diff options
-rw-r--r-- | arch/x86/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/cpu/call64.S | 93 | ||||
-rw-r--r-- | arch/x86/cpu/cpu.c | 144 | ||||
-rw-r--r-- | arch/x86/include/asm/bootm.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/cpu.h | 55 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 108 | ||||
-rw-r--r-- | arch/x86/include/asm/zimage.h | 4 | ||||
-rw-r--r-- | arch/x86/lib/bootm.c | 56 | ||||
-rw-r--r-- | arch/x86/lib/physmem.c | 33 | ||||
-rw-r--r-- | arch/x86/lib/zimage.c | 47 | ||||
-rw-r--r-- | common/bootm.c | 3 | ||||
-rw-r--r-- | common/image-fit.c | 3 | ||||
-rw-r--r-- | common/image.c | 1 | ||||
-rw-r--r-- | include/configs/coreboot.h | 4 | ||||
-rw-r--r-- | include/image.h | 1 |
15 files changed, 483 insertions, 87 deletions
diff --git a/arch/x86/cpu/Makefile b/arch/x86/cpu/Makefile index e7bb3e3..9d38ef7 100644 --- a/arch/x86/cpu/Makefile +++ b/arch/x86/cpu/Makefile @@ -10,4 +10,4 @@ extra-y = start.o obj-$(CONFIG_X86_RESET_VECTOR) += resetvec.o start16.o -obj-y += interrupts.o cpu.o +obj-y += interrupts.o cpu.o call64.o diff --git a/arch/x86/cpu/call64.S b/arch/x86/cpu/call64.S new file mode 100644 index 0000000..74dd5a8 --- /dev/null +++ b/arch/x86/cpu/call64.S @@ -0,0 +1,93 @@ +/* + * (C) Copyright 2014 Google, Inc + * Copyright (C) 1991, 1992, 1993 Linus Torvalds + * + * Parts of this copied from Linux arch/x86/boot/compressed/head_64.S + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <asm/global_data.h> +#include <asm/msr-index.h> +#include <asm/processor-flags.h> + +.code32 +.globl cpu_call64 +cpu_call64: + /* + * cpu_call64(ulong pgtable, ulong setup_base, ulong target) + * + * eax - pgtable + * edx - setup_base + * ecx - target + */ + cli + push %ecx /* arg2 = target */ + push %edx /* arg1 = setup_base */ + mov %eax, %ebx + + /* Load new GDT with the 64bit segments using 32bit descriptor */ + leal gdt, %eax + movl %eax, gdt+2 + lgdt gdt + + /* Enable PAE mode */ + movl $(X86_CR4_PAE), %eax + movl %eax, %cr4 + + /* Enable the boot page tables */ + leal (%ebx), %eax + movl %eax, %cr3 + + /* Enable Long mode in EFER (Extended Feature Enable Register) */ + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + wrmsr + + /* After gdt is loaded */ + xorl %eax, %eax + lldt %ax + movl $0x20, %eax + ltr %ax + + /* + * Setup for the jump to 64bit mode + * + * When the jump is performed we will be in long mode but + * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 + * (and in turn EFER.LMA = 1). To jump into 64bit mode we use + * the new gdt/idt that has __KERNEL_CS with CS.L = 1. + * We place all of the values on our mini stack so lret can + * used to perform that far jump. See the gdt below. + */ + pop %esi /* setup_base */ + + pushl $0x10 + leal lret_target, %eax + pushl %eax + + /* Enter paged protected Mode, activating Long Mode */ + movl $(X86_CR0_PG | X86_CR0_PE), %eax + movl %eax, %cr0 + + /* Jump from 32bit compatibility mode into 64bit mode. */ + lret + +code64: +lret_target: + pop %eax /* target */ + mov %eax, %eax /* Clear bits 63:32 */ + jmp *%eax /* Jump to the 64-bit target */ + + .data +gdt: + .word gdt_end - gdt + .long gdt + .word 0 + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00af9a000000ffff /* __KERNEL_CS */ + .quad 0x00cf92000000ffff /* __KERNEL_DS */ + .quad 0x0080890000000000 /* TS descriptor */ + .quad 0x0000000000000000 /* TS continued */ +gdt_end: diff --git a/arch/x86/cpu/cpu.c b/arch/x86/cpu/cpu.c index 623e3af..2e25253 100644 --- a/arch/x86/cpu/cpu.c +++ b/arch/x86/cpu/cpu.c @@ -18,7 +18,10 @@ #include <common.h> #include <command.h> +#include <errno.h> +#include <malloc.h> #include <asm/control_regs.h> +#include <asm/cpu.h> #include <asm/processor.h> #include <asm/processor-flags.h> #include <asm/interrupt.h> @@ -240,3 +243,144 @@ int icache_status(void) { return 1; } + +void cpu_enable_paging_pae(ulong cr3) +{ + __asm__ __volatile__( + /* Load the page table address */ + "movl %0, %%cr3\n" + /* Enable pae */ + "movl %%cr4, %%eax\n" + "orl $0x00000020, %%eax\n" + "movl %%eax, %%cr4\n" + /* Enable paging */ + "movl %%cr0, %%eax\n" + "orl $0x80000000, %%eax\n" + "movl %%eax, %%cr0\n" + : + : "r" (cr3) + : "eax"); +} + +void cpu_disable_paging_pae(void) +{ + /* Turn off paging */ + __asm__ __volatile__ ( + /* Disable paging */ + "movl %%cr0, %%eax\n" + "andl $0x7fffffff, %%eax\n" + "movl %%eax, %%cr0\n" + /* Disable pae */ + "movl %%cr4, %%eax\n" + "andl $0xffffffdf, %%eax\n" + "movl %%eax, %%cr4\n" + : + : + : "eax"); +} + +static bool has_cpuid(void) +{ + unsigned long flag; + + asm volatile("pushf\n" \ + "pop %%eax\n" + "mov %%eax, %%ecx\n" /* ecx = flags */ + "xor %1, %%eax\n" + "push %%eax\n" + "popf\n" /* flags ^= $2 */ + "pushf\n" + "pop %%eax\n" /* eax = flags */ + "push %%ecx\n" + "popf\n" /* flags = ecx */ + "xor %%ecx, %%eax\n" + "mov %%eax, %0" + : "=r" (flag) + : "i" (1 << 21) + : "eax", "ecx", "memory"); + + return flag != 0; +} + +static bool can_detect_long_mode(void) +{ + unsigned long flag; + + asm volatile("mov $0x80000000, %%eax\n" + "cpuid\n" + "mov %%eax, %0" + : "=r" (flag) + : + : "eax", "ebx", "ecx", "edx", "memory"); + + return flag > 0x80000000UL; +} + +static bool has_long_mode(void) +{ + unsigned long flag; + + asm volatile("mov $0x80000001, %%eax\n" + "cpuid\n" + "mov %%edx, %0" + : "=r" (flag) + : + : "eax", "ebx", "ecx", "edx", "memory"); + + return flag & (1 << 29) ? true : false; +} + +int cpu_has_64bit(void) +{ + return has_cpuid() && can_detect_long_mode() && + has_long_mode(); +} + +int print_cpuinfo(void) +{ + printf("CPU: %s\n", cpu_has_64bit() ? "x86_64" : "x86"); + + return 0; +} + +#define PAGETABLE_SIZE (6 * 4096) + +/** + * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode + * + * @pgtable: Pointer to a 24iKB block of memory + */ +static void build_pagetable(uint32_t *pgtable) +{ + uint i; + + memset(pgtable, '\0', PAGETABLE_SIZE); + + /* Level 4 needs a single entry */ + pgtable[0] = (uint32_t)&pgtable[1024] + 7; + + /* Level 3 has one 64-bit entry for each GiB of memory */ + for (i = 0; i < 4; i++) { + pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] + + 0x1000 * i + 7; + } + + /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */ + for (i = 0; i < 2048; i++) + pgtable[2048 + i * 2] = 0x183 + (i << 21UL); +} + +int cpu_jump_to_64bit(ulong setup_base, ulong target) +{ + uint32_t *pgtable; + + pgtable = memalign(4096, PAGETABLE_SIZE); + if (!pgtable) + return -ENOMEM; + + build_pagetable(pgtable); + cpu_call64((ulong)pgtable, setup_base, target); + free(pgtable); + + return -EFAULT; +} diff --git a/arch/x86/include/asm/bootm.h b/arch/x86/include/asm/bootm.h index 033ab79..f6a64ce 100644 --- a/arch/x86/include/asm/bootm.h +++ b/arch/x86/include/asm/bootm.h @@ -9,4 +9,20 @@ void bootm_announce_and_cleanup(void); +/** + * boot_linux_kernel() - boot a linux kernel + * + * This boots a kernel image, either 32-bit or 64-bit. It will also work with + * a self-extracting kernel, if you set @image_64bit to false. + * + * @setup_base: Pointer to the setup.bin information for the kernel + * @load_address: Pointer to the start of the kernel image + * @image_64bit: true if the image is a raw 64-bit kernel, false if it + * is raw 32-bit or any type of self-extracting kernel + * such as a bzImage. + * @return -ve error code. This function does not return if the kernel was + * booted successfully. + */ +int boot_linux_kernel(ulong setup_base, ulong load_address, bool image_64bit); + #endif diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h new file mode 100644 index 0000000..6c6774a --- /dev/null +++ b/arch/x86/include/asm/cpu.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2014 The Chromium OS Authors. + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef __X86_CPU_H +#define __X86_CPU_H + + /** + * cpu_enable_paging_pae() - Enable PAE-paging + * + * @pdpt: Value to set in cr3 (PDPT or PML4T) + */ +void cpu_enable_paging_pae(ulong cr3); + +/** + * cpu_disable_paging_pae() - Disable paging and PAE + */ +void cpu_disable_paging_pae(void); + +/** + * cpu_has_64bit() - Check if the CPU has 64-bit support + * + * @return 1 if this CPU supports long mode (64-bit), 0 if not + */ +int cpu_has_64bit(void); + +/** + * cpu_call64() - Jump to a 64-bit Linux kernel (internal function) + * + * The kernel is uncompressed and the 64-bit entry point is expected to be + * at @target. + * + * This function is used internally - see cpu_jump_to_64bit() for a more + * useful function. + * + * @pgtable: Address of 24KB area containing the page table + * @setup_base: Pointer to the setup.bin information for the kernel + * @target: Pointer to the start of the kernel image + */ +void cpu_call64(ulong pgtable, ulong setup_base, ulong target); + +/** + * cpu_jump_to_64bit() - Jump to a 64-bit Linux kernel + * + * The kernel is uncompressed and the 64-bit entry point is expected to be + * at @target. + * + * @setup_base: Pointer to the setup.bin information for the kernel + * @target: Pointer to the start of the kernel image + */ +int cpu_jump_to_64bit(ulong setup_base, ulong target); + +#endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 0a36e17..6027d59 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -44,12 +44,16 @@ #define MSR_IA32_PERFCTR0 0x000000c1 #define MSR_IA32_PERFCTR1 0x000000c2 #define MSR_FSB_FREQ 0x000000cd +#define MSR_NHM_PLATFORM_INFO 0x000000ce #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 #define NHM_C3_AUTO_DEMOTE (1UL << 25) #define NHM_C1_AUTO_DEMOTE (1UL << 26) #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) +#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) +#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) +#define MSR_PLATFORM_INFO 0x000000ce #define MSR_MTRRcap 0x000000fe #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e @@ -64,10 +68,20 @@ #define MSR_OFFCORE_RSP_0 0x000001a6 #define MSR_OFFCORE_RSP_1 0x000001a7 +#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad +#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae + +#define MSR_LBR_SELECT 0x000001c8 +#define MSR_LBR_TOS 0x000001c9 +#define MSR_LBR_NHM_FROM 0x00000680 +#define MSR_LBR_NHM_TO 0x000006c0 +#define MSR_LBR_CORE_FROM 0x00000040 +#define MSR_LBR_CORE_TO 0x00000060 #define MSR_IA32_PEBS_ENABLE 0x000003f1 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 +#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 #define MSR_MTRRfix64K_00000 0x00000250 #define MSR_MTRRfix16K_80000 0x00000258 @@ -91,7 +105,8 @@ #define MSR_IA32_LASTINTTOIP 0x000001de /* DEBUGCTLMSR bits (others vary by model): */ -#define DEBUGCTLMSR_LBR (1UL << 0) +#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ +/* single-step on branches */ #define DEBUGCTLMSR_BTF (1UL << 1) #define DEBUGCTLMSR_TR (1UL << 6) #define DEBUGCTLMSR_BTS (1UL << 7) @@ -100,11 +115,50 @@ #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) +#define MSR_IA32_POWER_CTL 0x000001fc + #define MSR_IA32_MC0_CTL 0x00000400 #define MSR_IA32_MC0_STATUS 0x00000401 #define MSR_IA32_MC0_ADDR 0x00000402 #define MSR_IA32_MC0_MISC 0x00000403 +/* C-state Residency Counters */ +#define MSR_PKG_C3_RESIDENCY 0x000003f8 +#define MSR_PKG_C6_RESIDENCY 0x000003f9 +#define MSR_PKG_C7_RESIDENCY 0x000003fa +#define MSR_CORE_C3_RESIDENCY 0x000003fc +#define MSR_CORE_C6_RESIDENCY 0x000003fd +#define MSR_CORE_C7_RESIDENCY 0x000003fe +#define MSR_PKG_C2_RESIDENCY 0x0000060d +#define MSR_PKG_C8_RESIDENCY 0x00000630 +#define MSR_PKG_C9_RESIDENCY 0x00000631 +#define MSR_PKG_C10_RESIDENCY 0x00000632 + +/* Run Time Average Power Limiting (RAPL) Interface */ + +#define MSR_RAPL_POWER_UNIT 0x00000606 + +#define MSR_PKG_POWER_LIMIT 0x00000610 +#define MSR_PKG_ENERGY_STATUS 0x00000611 +#define MSR_PKG_PERF_STATUS 0x00000613 +#define MSR_PKG_POWER_INFO 0x00000614 + +#define MSR_DRAM_POWER_LIMIT 0x00000618 +#define MSR_DRAM_ENERGY_STATUS 0x00000619 +#define MSR_DRAM_PERF_STATUS 0x0000061b +#define MSR_DRAM_POWER_INFO 0x0000061c + +#define MSR_PP0_POWER_LIMIT 0x00000638 +#define MSR_PP0_ENERGY_STATUS 0x00000639 +#define MSR_PP0_POLICY 0x0000063a +#define MSR_PP0_PERF_STATUS 0x0000063b + +#define MSR_PP1_POWER_LIMIT 0x00000640 +#define MSR_PP1_ENERGY_STATUS 0x00000641 +#define MSR_PP1_POLICY 0x00000642 + +#define MSR_CORE_C1_RES 0x00000660 + #define MSR_AMD64_MC0_MASK 0xc0010044 #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) @@ -123,18 +177,31 @@ #define MSR_P6_EVNTSEL0 0x00000186 #define MSR_P6_EVNTSEL1 0x00000187 +#define MSR_KNC_PERFCTR0 0x00000020 +#define MSR_KNC_PERFCTR1 0x00000021 +#define MSR_KNC_EVNTSEL0 0x00000028 +#define MSR_KNC_EVNTSEL1 0x00000029 + +/* Alternative perfctr range with full access. */ +#define MSR_IA32_PMC0 0x000004c1 + /* AMD64 MSRs. Not complete. See the architecture manual for a more complete list. */ #define MSR_AMD64_PATCH_LEVEL 0x0000008b +#define MSR_AMD64_TSC_RATIO 0xc0000104 #define MSR_AMD64_NB_CFG 0xc001001f #define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 +#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 +#define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 +#define MSR_AMD64_IBSFETCH_REG_COUNT 3 +#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1) #define MSR_AMD64_IBSOPCTL 0xc0011033 #define MSR_AMD64_IBSOPRIP 0xc0011034 #define MSR_AMD64_IBSOPDATA 0xc0011035 @@ -142,12 +209,21 @@ #define MSR_AMD64_IBSOPDATA3 0xc0011037 #define MSR_AMD64_IBSDCLINAD 0xc0011038 #define MSR_AMD64_IBSDCPHYSAD 0xc0011039 +#define MSR_AMD64_IBSOP_REG_COUNT 7 +#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) #define MSR_AMD64_IBSCTL 0xc001103a #define MSR_AMD64_IBSBRTARGET 0xc001103b +#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ + +/* Fam 16h MSRs */ +#define MSR_F16H_L2I_PERF_CTL 0xc0010230 +#define MSR_F16H_L2I_PERF_CTR 0xc0010231 /* Fam 15h MSRs */ #define MSR_F15H_PERF_CTL 0xc0010200 #define MSR_F15H_PERF_CTR 0xc0010201 +#define MSR_F15H_NB_PERF_CTL 0xc0010240 +#define MSR_F15H_NB_PERF_CTR 0xc0010241 /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 @@ -226,7 +302,9 @@ #define MSR_IA32_PLATFORM_ID 0x00000017 #define MSR_IA32_EBL_CR_POWERON 0x0000002a #define MSR_EBC_FREQUENCY_ID 0x0000002c +#define MSR_SMI_COUNT 0x00000034 #define MSR_IA32_FEATURE_CONTROL 0x0000003a +#define MSR_IA32_TSC_ADJUST 0x0000003b #define FEATURE_CONTROL_LOCKED (1<<0) #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) @@ -237,11 +315,16 @@ #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) +#define MSR_IA32_TSCDEADLINE 0x000006e0 + #define MSR_IA32_UCODE_WRITE 0x00000079 #define MSR_IA32_UCODE_REV 0x0000008b #define MSR_IA32_PERF_STATUS 0x00000198 #define MSR_IA32_PERF_CTL 0x00000199 +#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 +#define MSR_AMD_PERF_STATUS 0xc0010063 +#define MSR_AMD_PERF_CTL 0xc0010062 #define MSR_IA32_MPERF 0x000000e7 #define MSR_IA32_APERF 0x000000e8 @@ -267,6 +350,9 @@ #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 +#define ENERGY_PERF_BIAS_PERFORMANCE 0 +#define ENERGY_PERF_BIAS_NORMAL 6 +#define ENERGY_PERF_BIAS_POWERSAVE 15 #define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 @@ -320,6 +406,8 @@ #define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) +#define MSR_IA32_TSC_DEADLINE 0x000006E0 + /* P4/Xeon+ specific */ #define MSR_IA32_MCG_EAX 0x00000180 #define MSR_IA32_MCG_EBX 0x00000181 @@ -446,7 +534,23 @@ #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c - +#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d +#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e +#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f +#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 +#define MSR_IA32_VMX_VMFUNC 0x00000491 + +/* VMX_BASIC bits and bitmasks */ +#define VMX_BASIC_VMCS_SIZE_SHIFT 32 +#define VMX_BASIC_64 0x0001000000000000LLU +#define VMX_BASIC_MEM_TYPE_SHIFT 50 +#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU +#define VMX_BASIC_MEM_TYPE_WB 6LLU +#define VMX_BASIC_INOUT 0x0040000000000000LLU + +/* MSR_IA32_VMX_MISC bits */ +#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) +#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F /* AMD-V MSRs */ #define MSR_VM_CR 0xc0010114 diff --git a/arch/x86/include/asm/zimage.h b/arch/x86/include/asm/zimage.h index 0f36662..8e7dd42 100644 --- a/arch/x86/include/asm/zimage.h +++ b/arch/x86/include/asm/zimage.h @@ -35,10 +35,8 @@ unsigned install_e820_map(unsigned max_entries, struct e820entry *); struct boot_params *load_zimage(char *image, unsigned long kernel_size, - void **load_address); + ulong *load_addressp); int setup_zimage(struct boot_params *setup_base, char *cmd_line, int auto_boot, unsigned long initrd_addr, unsigned long initrd_size); -void boot_zimage(void *setup_base, void *load_address); - #endif diff --git a/arch/x86/lib/bootm.c b/arch/x86/lib/bootm.c index 4c5c7f5..86030cf 100644 --- a/arch/x86/lib/bootm.c +++ b/arch/x86/lib/bootm.c @@ -10,10 +10,12 @@ #include <common.h> #include <command.h> +#include <errno.h> #include <fdt_support.h> #include <image.h> #include <u-boot/zlib.h> #include <asm/bootparam.h> +#include <asm/cpu.h> #include <asm/byteorder.h> #include <asm/zimage.h> #ifdef CONFIG_SYS_COREBOOT @@ -109,17 +111,17 @@ static int boot_prep_linux(bootm_headers_t *images) } if (is_zimage) { - void *load_address; + ulong load_address; char *base_ptr; base_ptr = (char *)load_zimage(data, len, &load_address); - images->os.load = (ulong)load_address; + images->os.load = load_address; cmd_line_dest = base_ptr + COMMAND_LINE_OFFSET; images->ep = (ulong)base_ptr; } else if (images->ep) { cmd_line_dest = (void *)images->ep + COMMAND_LINE_OFFSET; } else { - printf("## Kernel loading failed (no setup) ...\n"); + printf("## Kernel loading failed (missing x86 kernel setup) ...\n"); goto error; } @@ -139,16 +141,50 @@ error: return 1; } +int boot_linux_kernel(ulong setup_base, ulong load_address, bool image_64bit) +{ + bootm_announce_and_cleanup(); + +#ifdef CONFIG_SYS_COREBOOT + timestamp_add_now(TS_U_BOOT_START_KERNEL); +#endif + if (image_64bit) { + if (!cpu_has_64bit()) { + puts("Cannot boot 64-bit kernel on 32-bit machine\n"); + return -EFAULT; + } + return cpu_jump_to_64bit(setup_base, load_address); + } else { + /* + * Set %ebx, %ebp, and %edi to 0, %esi to point to the + * boot_params structure, and then jump to the kernel. We + * assume that %cs is 0x10, 4GB flat, and read/execute, and + * the data segments are 0x18, 4GB flat, and read/write. + * U-boot is setting them up that way for itself in + * arch/i386/cpu/cpu.c. + */ + __asm__ __volatile__ ( + "movl $0, %%ebp\n" + "cli\n" + "jmp *%[kernel_entry]\n" + :: [kernel_entry]"a"(load_address), + [boot_params] "S"(setup_base), + "b"(0), "D"(0) + ); + } + + /* We can't get to here */ + return -EFAULT; +} + /* Subcommand: GO */ static int boot_jump_linux(bootm_headers_t *images) { debug("## Transferring control to Linux (at address %08lx, kernel %08lx) ...\n", images->ep, images->os.load); - boot_zimage((struct boot_params *)images->ep, (void *)images->os.load); - /* does not return */ - - return 1; + return boot_linux_kernel(images->ep, images->os.load, + images->os.arch == IH_ARCH_X86_64); } int do_bootm_linux(int flag, int argc, char * const argv[], @@ -161,10 +197,8 @@ int do_bootm_linux(int flag, int argc, char * const argv[], if (flag & BOOTM_STATE_OS_PREP) return boot_prep_linux(images); - if (flag & BOOTM_STATE_OS_GO) { - boot_jump_linux(images); - return 0; - } + if (flag & BOOTM_STATE_OS_GO) + return boot_jump_linux(images); return boot_jump_linux(images); } diff --git a/arch/x86/lib/physmem.c b/arch/x86/lib/physmem.c index b57b2c3..c3c709e 100644 --- a/arch/x86/lib/physmem.c +++ b/arch/x86/lib/physmem.c @@ -10,6 +10,7 @@ #include <common.h> #include <physmem.h> +#include <asm/cpu.h> #include <linux/compiler.h> DECLARE_GLOBAL_DATA_PTR; @@ -112,41 +113,13 @@ static void x86_phys_enter_paging(void) x86_phys_map_page(page_addr, page_addr, 0); } - /* Turn on paging */ - __asm__ __volatile__( - /* Load the page table address */ - "movl %0, %%cr3\n\t" - /* Enable pae */ - "movl %%cr4, %%eax\n\t" - "orl $0x00000020, %%eax\n\t" - "movl %%eax, %%cr4\n\t" - /* Enable paging */ - "movl %%cr0, %%eax\n\t" - "orl $0x80000000, %%eax\n\t" - "movl %%eax, %%cr0\n\t" - : - : "r" (pdpt) - : "eax" - ); + cpu_enable_paging_pae((ulong)pdpt); } /* Disable paging and PAE mode. */ static void x86_phys_exit_paging(void) { - /* Turn off paging */ - __asm__ __volatile__ ( - /* Disable paging */ - "movl %%cr0, %%eax\n\t" - "andl $0x7fffffff, %%eax\n\t" - "movl %%eax, %%cr0\n\t" - /* Disable pae */ - "movl %%cr4, %%eax\n\t" - "andl $0xffffffdf, %%eax\n\t" - "movl %%eax, %%cr4\n\t" - : - : - : "eax" - ); + cpu_disable_paging_pae(); } /* diff --git a/arch/x86/lib/zimage.c b/arch/x86/lib/zimage.c index b190283..566b048 100644 --- a/arch/x86/lib/zimage.c +++ b/arch/x86/lib/zimage.c @@ -103,7 +103,7 @@ static int get_boot_protocol(struct setup_header *hdr) } struct boot_params *load_zimage(char *image, unsigned long kernel_size, - void **load_address) + ulong *load_addressp) { struct boot_params *setup_base; int setup_size; @@ -155,9 +155,9 @@ struct boot_params *load_zimage(char *image, unsigned long kernel_size, /* Determine load address */ if (big_image) - *load_address = (void *)BZIMAGE_LOAD_ADDR; + *load_addressp = BZIMAGE_LOAD_ADDR; else - *load_address = (void *)ZIMAGE_LOAD_ADDR; + *load_addressp = ZIMAGE_LOAD_ADDR; printf("Building boot_params at 0x%8.8lx\n", (ulong)setup_base); memset(setup_base, 0, sizeof(*setup_base)); @@ -204,10 +204,10 @@ struct boot_params *load_zimage(char *image, unsigned long kernel_size, return 0; } - printf("Loading %s at address %p (%ld bytes)\n", - big_image ? "bzImage" : "zImage", *load_address, kernel_size); + printf("Loading %s at address %lx (%ld bytes)\n", + big_image ? "bzImage" : "zImage", *load_addressp, kernel_size); - memmove(*load_address, image + setup_size, kernel_size); + memmove((void *)*load_addressp, image + setup_size, kernel_size); return setup_base; } @@ -261,30 +261,6 @@ int setup_zimage(struct boot_params *setup_base, char *cmd_line, int auto_boot, return 0; } -void boot_zimage(void *setup_base, void *load_address) -{ - bootm_announce_and_cleanup(); - -#ifdef CONFIG_SYS_COREBOOT - timestamp_add_now(TS_U_BOOT_START_KERNEL); -#endif - /* - * Set %ebx, %ebp, and %edi to 0, %esi to point to the boot_params - * structure, and then jump to the kernel. We assume that %cs is - * 0x10, 4GB flat, and read/execute, and the data segments are 0x18, - * 4GB flat, and read/write. U-boot is setting them up that way for - * itself in arch/i386/cpu/cpu.c. - */ - __asm__ __volatile__ ( - "movl $0, %%ebp\n" - "cli\n" - "jmp *%[kernel_entry]\n" - :: [kernel_entry]"a"(load_address), - [boot_params] "S"(setup_base), - "b"(0), "D"(0) - ); -} - void setup_pcat_compatibility(void) __attribute__((weak, alias("__setup_pcat_compatibility"))); @@ -296,7 +272,7 @@ int do_zboot(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { struct boot_params *base_ptr; void *bzImage_addr = NULL; - void *load_address; + ulong load_address; char *s; ulong bzImage_size = 0; ulong initrd_addr = 0; @@ -331,20 +307,17 @@ int do_zboot(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) base_ptr = load_zimage(bzImage_addr, bzImage_size, &load_address); if (!base_ptr) { - printf("## Kernel loading failed ...\n"); + puts("## Kernel loading failed ...\n"); return -1; } if (setup_zimage(base_ptr, (char *)base_ptr + COMMAND_LINE_OFFSET, 0, initrd_addr, initrd_size)) { - printf("Setting up boot parameters failed ...\n"); + puts("Setting up boot parameters failed ...\n"); return -1; } /* we assume that the kernel is in place */ - boot_zimage(base_ptr, load_address); - /* does not return */ - - return -1; + return boot_linux_kernel((ulong)base_ptr, load_address, false); } U_BOOT_CMD( diff --git a/common/bootm.c b/common/bootm.c index 81e3261..6b3ea8c 100644 --- a/common/bootm.c +++ b/common/bootm.c @@ -167,7 +167,8 @@ static int bootm_find_os(cmd_tbl_t *cmdtp, int flag, int argc, } /* If we have a valid setup.bin, we will use that for entry (x86) */ - if (images.os.arch == IH_ARCH_I386) { + if (images.os.arch == IH_ARCH_I386 || + images.os.arch == IH_ARCH_X86_64) { ulong len; ret = boot_get_setup(&images, IH_ARCH_I386, &images.ep, &len); diff --git a/common/image-fit.c b/common/image-fit.c index a272ea2..4ffc5aa 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1114,7 +1114,8 @@ int fit_image_check_arch(const void *fit, int noffset, uint8_t arch) if (fit_image_get_arch(fit, noffset, &image_arch)) return 0; - return (arch == image_arch); + return (arch == image_arch) || + (arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64); } /** diff --git a/common/image.c b/common/image.c index eb92e63..b75a5ce 100644 --- a/common/image.c +++ b/common/image.c @@ -85,6 +85,7 @@ static const table_entry_t uimage_arch[] = { { IH_ARCH_SANDBOX, "sandbox", "Sandbox", }, { IH_ARCH_ARM64, "arm64", "AArch64", }, { IH_ARCH_ARC, "arc", "ARC", }, + { IH_ARCH_X86_64, "x86_64", "AMD x86_64", }, { -1, "", "", }, }; diff --git a/include/configs/coreboot.h b/include/configs/coreboot.h index 4b90dc2..fef267f 100644 --- a/include/configs/coreboot.h +++ b/include/configs/coreboot.h @@ -26,6 +26,7 @@ #define CONFIG_PHYSMEM #define CONFIG_SYS_EARLY_PCI_INIT #define CONFIG_DISPLAY_BOARDINFO_LATE +#define CONFIG_DISPLAY_CPUINFO #define CONFIG_DM #define CONFIG_CMD_DM @@ -48,6 +49,7 @@ #define CONFIG_FIT #undef CONFIG_ZLIB #undef CONFIG_GZIP +#define CONFIG_SYS_BOOTM_LEN (16 << 20) /*----------------------------------------------------------------------- * Watchdog Configuration @@ -221,7 +223,7 @@ #define CONFIG_SYS_MEMTEST_START 0x00100000 #define CONFIG_SYS_MEMTEST_END 0x01000000 -#define CONFIG_SYS_LOAD_ADDR 0x02000000 +#define CONFIG_SYS_LOAD_ADDR 0x20000000 /*----------------------------------------------------------------------- * SDRAM Configuration diff --git a/include/image.h b/include/image.h index a13a302..07e9aed 100644 --- a/include/image.h +++ b/include/image.h @@ -173,6 +173,7 @@ struct lmb; #define IH_ARCH_OPENRISC 21 /* OpenRISC 1000 */ #define IH_ARCH_ARM64 22 /* ARM64 */ #define IH_ARCH_ARC 23 /* Synopsys DesignWare ARC */ +#define IH_ARCH_X86_64 24 /* AMD x86_64, Intel and Via */ /* * Image Types |