summaryrefslogtreecommitdiff
path: root/arch/x86/cpu/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/cpu/i386')
-rw-r--r--arch/x86/cpu/i386/Makefile9
-rw-r--r--arch/x86/cpu/i386/call64.S96
-rw-r--r--arch/x86/cpu/i386/cpu.c598
-rw-r--r--arch/x86/cpu/i386/interrupt.c615
-rw-r--r--arch/x86/cpu/i386/setjmp.S61
5 files changed, 1379 insertions, 0 deletions
diff --git a/arch/x86/cpu/i386/Makefile b/arch/x86/cpu/i386/Makefile
new file mode 100644
index 0000000..0c47252
--- /dev/null
+++ b/arch/x86/cpu/i386/Makefile
@@ -0,0 +1,9 @@
+#
+# (C) Copyright 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+obj-y += call64.o
+obj-y += cpu.o
+obj-y += interrupt.o
+obj-y += setjmp.o
diff --git a/arch/x86/cpu/i386/call64.S b/arch/x86/cpu/i386/call64.S
new file mode 100644
index 0000000..970c461
--- /dev/null
+++ b/arch/x86/cpu/i386/call64.S
@@ -0,0 +1,96 @@
+/*
+ * (C) Copyright 2014 Google, Inc
+ * Copyright (C) 1991, 1992, 1993 Linus Torvalds
+ *
+ * Parts of this copied from Linux arch/x86/boot/compressed/head_64.S
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <asm/global_data.h>
+#include <asm/msr-index.h>
+#include <asm/processor-flags.h>
+
+.code32
+.globl cpu_call64
+cpu_call64:
+ /*
+ * cpu_call64(ulong pgtable, ulong setup_base, ulong target)
+ *
+ * eax - pgtable
+ * edx - setup_base
+ * ecx - target
+ */
+ cli
+ push %ecx /* arg2 = target */
+ push %edx /* arg1 = setup_base */
+ mov %eax, %ebx
+
+ /* Load new GDT with the 64bit segments using 32bit descriptor */
+ leal gdt, %eax
+ movl %eax, gdt+2
+ lgdt gdt
+
+ /* Enable PAE mode */
+ movl $(X86_CR4_PAE), %eax
+ movl %eax, %cr4
+
+ /* Enable the boot page tables */
+ leal (%ebx), %eax
+ movl %eax, %cr3
+
+ /* Enable Long mode in EFER (Extended Feature Enable Register) */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* After gdt is loaded */
+ xorl %eax, %eax
+ lldt %ax
+ movl $0x20, %eax
+ ltr %ax
+
+ /*
+ * Setup for the jump to 64bit mode
+ *
+ * When the jump is performed we will be in long mode but
+ * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
+ * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
+ * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+ * We place all of the values on our mini stack so lret can
+ * used to perform that far jump. See the gdt below.
+ */
+ pop %esi /* setup_base */
+
+ pushl $0x10
+ leal lret_target, %eax
+ pushl %eax
+
+ /* Enter paged protected Mode, activating Long Mode */
+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
+ movl %eax, %cr0
+
+ /* Jump from 32bit compatibility mode into 64bit mode. */
+ lret
+
+code64:
+lret_target:
+ pop %eax /* target */
+ mov %eax, %eax /* Clear bits 63:32 */
+ jmp *%eax /* Jump to the 64-bit target */
+
+ .data
+ .align 16
+ .globl gdt64
+gdt64:
+gdt:
+ .word gdt_end - gdt - 1
+ .long gdt /* Fixed up by code above */
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+gdt_end:
diff --git a/arch/x86/cpu/i386/cpu.c b/arch/x86/cpu/i386/cpu.c
new file mode 100644
index 0000000..aabdc94
--- /dev/null
+++ b/arch/x86/cpu/i386/cpu.c
@@ -0,0 +1,598 @@
+/*
+ * (C) Copyright 2008-2011
+ * Graeme Russ, <graeme.russ@gmail.com>
+ *
+ * (C) Copyright 2002
+ * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
+ *
+ * (C) Copyright 2002
+ * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
+ * Marius Groeger <mgroeger@sysgo.de>
+ *
+ * (C) Copyright 2002
+ * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
+ * Alex Zuepke <azu@sysgo.de>
+ *
+ * Part of this file is adapted from coreboot
+ * src/arch/x86/lib/cpu.c
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <asm/control_regs.h>
+#include <asm/cpu.h>
+#include <asm/mp.h>
+#include <asm/msr.h>
+#include <asm/mtrr.h>
+#include <asm/processor-flags.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/*
+ * Constructor for a conventional segment GDT (or LDT) entry
+ * This is a macro so it can be used in initialisers
+ */
+#define GDT_ENTRY(flags, base, limit) \
+ ((((base) & 0xff000000ULL) << (56-24)) | \
+ (((flags) & 0x0000f0ffULL) << 40) | \
+ (((limit) & 0x000f0000ULL) << (48-16)) | \
+ (((base) & 0x00ffffffULL) << 16) | \
+ (((limit) & 0x0000ffffULL)))
+
+struct gdt_ptr {
+ u16 len;
+ u32 ptr;
+} __packed;
+
+struct cpu_device_id {
+ unsigned vendor;
+ unsigned device;
+};
+
+struct cpuinfo_x86 {
+ uint8_t x86; /* CPU family */
+ uint8_t x86_vendor; /* CPU vendor */
+ uint8_t x86_model;
+ uint8_t x86_mask;
+};
+
+/*
+ * List of cpu vendor strings along with their normalized
+ * id values.
+ */
+static const struct {
+ int vendor;
+ const char *name;
+} x86_vendors[] = {
+ { X86_VENDOR_INTEL, "GenuineIntel", },
+ { X86_VENDOR_CYRIX, "CyrixInstead", },
+ { X86_VENDOR_AMD, "AuthenticAMD", },
+ { X86_VENDOR_UMC, "UMC UMC UMC ", },
+ { X86_VENDOR_NEXGEN, "NexGenDriven", },
+ { X86_VENDOR_CENTAUR, "CentaurHauls", },
+ { X86_VENDOR_RISE, "RiseRiseRise", },
+ { X86_VENDOR_TRANSMETA, "GenuineTMx86", },
+ { X86_VENDOR_TRANSMETA, "TransmetaCPU", },
+ { X86_VENDOR_NSC, "Geode by NSC", },
+ { X86_VENDOR_SIS, "SiS SiS SiS ", },
+};
+
+static void load_ds(u32 segment)
+{
+ asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
+}
+
+static void load_es(u32 segment)
+{
+ asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
+}
+
+static void load_fs(u32 segment)
+{
+ asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
+}
+
+static void load_gs(u32 segment)
+{
+ asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
+}
+
+static void load_ss(u32 segment)
+{
+ asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
+}
+
+static void load_gdt(const u64 *boot_gdt, u16 num_entries)
+{
+ struct gdt_ptr gdt;
+
+ gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1;
+ gdt.ptr = (ulong)boot_gdt;
+
+ asm volatile("lgdtl %0\n" : : "m" (gdt));
+}
+
+void arch_setup_gd(gd_t *new_gd)
+{
+ u64 *gdt_addr;
+
+ gdt_addr = new_gd->arch.gdt;
+
+ /*
+ * CS: code, read/execute, 4 GB, base 0
+ *
+ * Some OS (like VxWorks) requires GDT entry 1 to be the 32-bit CS
+ */
+ gdt_addr[X86_GDT_ENTRY_UNUSED] = GDT_ENTRY(0xc09b, 0, 0xfffff);
+ gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
+
+ /* DS: data, read/write, 4 GB, base 0 */
+ gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
+
+ /* FS: data, read/write, 4 GB, base (Global Data Pointer) */
+ new_gd->arch.gd_addr = new_gd;
+ gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
+ (ulong)&new_gd->arch.gd_addr, 0xfffff);
+
+ /* 16-bit CS: code, read/execute, 64 kB, base 0 */
+ gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff);
+
+ /* 16-bit DS: data, read/write, 64 kB, base 0 */
+ gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff);
+
+ gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff);
+ gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff);
+
+ load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
+ load_ds(X86_GDT_ENTRY_32BIT_DS);
+ load_es(X86_GDT_ENTRY_32BIT_DS);
+ load_gs(X86_GDT_ENTRY_32BIT_DS);
+ load_ss(X86_GDT_ENTRY_32BIT_DS);
+ load_fs(X86_GDT_ENTRY_32BIT_FS);
+}
+
+#ifdef CONFIG_HAVE_FSP
+/*
+ * Setup FSP execution environment GDT
+ *
+ * Per Intel FSP external architecture specification, before calling any FSP
+ * APIs, we need make sure the system is in flat 32-bit mode and both the code
+ * and data selectors should have full 4GB access range. Here we reuse the one
+ * we used in arch/x86/cpu/start16.S, and reload the segement registers.
+ */
+void setup_fsp_gdt(void)
+{
+ load_gdt((const u64 *)(gdt_rom + CONFIG_RESET_SEG_START), 4);
+ load_ds(X86_GDT_ENTRY_32BIT_DS);
+ load_ss(X86_GDT_ENTRY_32BIT_DS);
+ load_es(X86_GDT_ENTRY_32BIT_DS);
+ load_fs(X86_GDT_ENTRY_32BIT_DS);
+ load_gs(X86_GDT_ENTRY_32BIT_DS);
+}
+#endif
+
+/*
+ * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
+ * by the fact that they preserve the flags across the division of 5/2.
+ * PII and PPro exhibit this behavior too, but they have cpuid available.
+ */
+
+/*
+ * Perform the Cyrix 5/2 test. A Cyrix won't change
+ * the flags, while other 486 chips will.
+ */
+static inline int test_cyrix_52div(void)
+{
+ unsigned int test;
+
+ __asm__ __volatile__(
+ "sahf\n\t" /* clear flags (%eax = 0x0005) */
+ "div %b2\n\t" /* divide 5 by 2 */
+ "lahf" /* store flags into %ah */
+ : "=a" (test)
+ : "0" (5), "q" (2)
+ : "cc");
+
+ /* AH is 0x02 on Cyrix after the divide.. */
+ return (unsigned char) (test >> 8) == 0x02;
+}
+
+/*
+ * Detect a NexGen CPU running without BIOS hypercode new enough
+ * to have CPUID. (Thanks to Herbert Oppmann)
+ */
+static int deep_magic_nexgen_probe(void)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ " movw $0x5555, %%ax\n"
+ " xorw %%dx,%%dx\n"
+ " movw $2, %%cx\n"
+ " divw %%cx\n"
+ " movl $0, %%eax\n"
+ " jnz 1f\n"
+ " movl $1, %%eax\n"
+ "1:\n"
+ : "=a" (ret) : : "cx", "dx");
+ return ret;
+}
+
+static bool has_cpuid(void)
+{
+ return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+static bool has_mtrr(void)
+{
+ return cpuid_edx(0x00000001) & (1 << 12) ? true : false;
+}
+
+static int build_vendor_name(char *vendor_name)
+{
+ struct cpuid_result result;
+ result = cpuid(0x00000000);
+ unsigned int *name_as_ints = (unsigned int *)vendor_name;
+
+ name_as_ints[0] = result.ebx;
+ name_as_ints[1] = result.edx;
+ name_as_ints[2] = result.ecx;
+
+ return result.eax;
+}
+
+static void identify_cpu(struct cpu_device_id *cpu)
+{
+ char vendor_name[16];
+ int i;
+
+ vendor_name[0] = '\0'; /* Unset */
+ cpu->device = 0; /* fix gcc 4.4.4 warning */
+
+ /* Find the id and vendor_name */
+ if (!has_cpuid()) {
+ /* Its a 486 if we can modify the AC flag */
+ if (flag_is_changeable_p(X86_EFLAGS_AC))
+ cpu->device = 0x00000400; /* 486 */
+ else
+ cpu->device = 0x00000300; /* 386 */
+ if ((cpu->device == 0x00000400) && test_cyrix_52div()) {
+ memcpy(vendor_name, "CyrixInstead", 13);
+ /* If we ever care we can enable cpuid here */
+ }
+ /* Detect NexGen with old hypercode */
+ else if (deep_magic_nexgen_probe())
+ memcpy(vendor_name, "NexGenDriven", 13);
+ }
+ if (has_cpuid()) {
+ int cpuid_level;
+
+ cpuid_level = build_vendor_name(vendor_name);
+ vendor_name[12] = '\0';
+
+ /* Intel-defined flags: level 0x00000001 */
+ if (cpuid_level >= 0x00000001) {
+ cpu->device = cpuid_eax(0x00000001);
+ } else {
+ /* Have CPUID level 0 only unheard of */
+ cpu->device = 0x00000400;
+ }
+ }
+ cpu->vendor = X86_VENDOR_UNKNOWN;
+ for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
+ if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
+ cpu->vendor = x86_vendors[i].vendor;
+ break;
+ }
+ }
+}
+
+static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
+{
+ c->x86 = (tfms >> 8) & 0xf;
+ c->x86_model = (tfms >> 4) & 0xf;
+ c->x86_mask = tfms & 0xf;
+ if (c->x86 == 0xf)
+ c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+}
+
+u32 cpu_get_family_model(void)
+{
+ return gd->arch.x86_device & 0x0fff0ff0;
+}
+
+u32 cpu_get_stepping(void)
+{
+ return gd->arch.x86_mask;
+}
+
+int x86_cpu_init_f(void)
+{
+ const u32 em_rst = ~X86_CR0_EM;
+ const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
+
+ if (ll_boot_init()) {
+ /* initialize FPU, reset EM, set MP and NE */
+ asm ("fninit\n" \
+ "movl %%cr0, %%eax\n" \
+ "andl %0, %%eax\n" \
+ "orl %1, %%eax\n" \
+ "movl %%eax, %%cr0\n" \
+ : : "i" (em_rst), "i" (mp_ne_set) : "eax");
+ }
+
+ /* identify CPU via cpuid and store the decoded info into gd->arch */
+ if (has_cpuid()) {
+ struct cpu_device_id cpu;
+ struct cpuinfo_x86 c;
+
+ identify_cpu(&cpu);
+ get_fms(&c, cpu.device);
+ gd->arch.x86 = c.x86;
+ gd->arch.x86_vendor = cpu.vendor;
+ gd->arch.x86_model = c.x86_model;
+ gd->arch.x86_mask = c.x86_mask;
+ gd->arch.x86_device = cpu.device;
+
+ gd->arch.has_mtrr = has_mtrr();
+ }
+ /* Don't allow PCI region 3 to use memory in the 2-4GB memory hole */
+ gd->pci_ram_top = 0x80000000U;
+
+ /* Configure fixed range MTRRs for some legacy regions */
+ if (gd->arch.has_mtrr) {
+ u64 mtrr_cap;
+
+ mtrr_cap = native_read_msr(MTRR_CAP_MSR);
+ if (mtrr_cap & MTRR_CAP_FIX) {
+ /* Mark the VGA RAM area as uncacheable */
+ native_write_msr(MTRR_FIX_16K_A0000_MSR,
+ MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE),
+ MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE));
+
+ /*
+ * Mark the PCI ROM area as cacheable to improve ROM
+ * execution performance.
+ */
+ native_write_msr(MTRR_FIX_4K_C0000_MSR,
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+ native_write_msr(MTRR_FIX_4K_C8000_MSR,
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+ native_write_msr(MTRR_FIX_4K_D0000_MSR,
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+ native_write_msr(MTRR_FIX_4K_D8000_MSR,
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
+ MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
+
+ /* Enable the fixed range MTRRs */
+ msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN);
+ }
+ }
+
+#ifdef CONFIG_I8254_TIMER
+ /* Set up the i8254 timer if required */
+ i8254_init();
+#endif
+
+ return 0;
+}
+
+void x86_enable_caches(void)
+{
+ unsigned long cr0;
+
+ cr0 = read_cr0();
+ cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
+ write_cr0(cr0);
+ wbinvd();
+}
+void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
+
+void x86_disable_caches(void)
+{
+ unsigned long cr0;
+
+ cr0 = read_cr0();
+ cr0 |= X86_CR0_NW | X86_CR0_CD;
+ wbinvd();
+ write_cr0(cr0);
+ wbinvd();
+}
+void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
+
+int dcache_status(void)
+{
+ return !(read_cr0() & X86_CR0_CD);
+}
+
+void cpu_enable_paging_pae(ulong cr3)
+{
+ __asm__ __volatile__(
+ /* Load the page table address */
+ "movl %0, %%cr3\n"
+ /* Enable pae */
+ "movl %%cr4, %%eax\n"
+ "orl $0x00000020, %%eax\n"
+ "movl %%eax, %%cr4\n"
+ /* Enable paging */
+ "movl %%cr0, %%eax\n"
+ "orl $0x80000000, %%eax\n"
+ "movl %%eax, %%cr0\n"
+ :
+ : "r" (cr3)
+ : "eax");
+}
+
+void cpu_disable_paging_pae(void)
+{
+ /* Turn off paging */
+ __asm__ __volatile__ (
+ /* Disable paging */
+ "movl %%cr0, %%eax\n"
+ "andl $0x7fffffff, %%eax\n"
+ "movl %%eax, %%cr0\n"
+ /* Disable pae */
+ "movl %%cr4, %%eax\n"
+ "andl $0xffffffdf, %%eax\n"
+ "movl %%eax, %%cr4\n"
+ :
+ :
+ : "eax");
+}
+
+static bool can_detect_long_mode(void)
+{
+ return cpuid_eax(0x80000000) > 0x80000000UL;
+}
+
+static bool has_long_mode(void)
+{
+ return cpuid_edx(0x80000001) & (1 << 29) ? true : false;
+}
+
+int cpu_has_64bit(void)
+{
+ return has_cpuid() && can_detect_long_mode() &&
+ has_long_mode();
+}
+
+#define PAGETABLE_SIZE (6 * 4096)
+
+/**
+ * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
+ *
+ * @pgtable: Pointer to a 24iKB block of memory
+ */
+static void build_pagetable(uint32_t *pgtable)
+{
+ uint i;
+
+ memset(pgtable, '\0', PAGETABLE_SIZE);
+
+ /* Level 4 needs a single entry */
+ pgtable[0] = (ulong)&pgtable[1024] + 7;
+
+ /* Level 3 has one 64-bit entry for each GiB of memory */
+ for (i = 0; i < 4; i++)
+ pgtable[1024 + i * 2] = (ulong)&pgtable[2048] + 0x1000 * i + 7;
+
+ /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
+ for (i = 0; i < 2048; i++)
+ pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
+}
+
+int cpu_jump_to_64bit(ulong setup_base, ulong target)
+{
+ uint32_t *pgtable;
+
+ pgtable = memalign(4096, PAGETABLE_SIZE);
+ if (!pgtable)
+ return -ENOMEM;
+
+ build_pagetable(pgtable);
+ cpu_call64((ulong)pgtable, setup_base, target);
+ free(pgtable);
+
+ return -EFAULT;
+}
+
+/*
+ * Jump from SPL to U-Boot
+ *
+ * This function is work-in-progress with many issues to resolve.
+ *
+ * It works by setting up several regions:
+ * ptr - a place to put the code that jumps into 64-bit mode
+ * gdt - a place to put the global descriptor table
+ * pgtable - a place to put the page tables
+ *
+ * The cpu_call64() code is copied from ROM and then manually patched so that
+ * it has the correct GDT address in RAM. U-Boot is copied from ROM into
+ * its pre-relocation address. Then we jump to the cpu_call64() code in RAM,
+ * which changes to 64-bit mode and starts U-Boot.
+ */
+int cpu_jump_to_64bit_uboot(ulong target)
+{
+ typedef void (*func_t)(ulong pgtable, ulong setup_base, ulong target);
+ uint32_t *pgtable;
+ func_t func;
+
+ /* TODO(sjg@chromium.org): Find a better place for this */
+ pgtable = (uint32_t *)0x1000000;
+ if (!pgtable)
+ return -ENOMEM;
+
+ build_pagetable(pgtable);
+
+ /* TODO(sjg@chromium.org): Find a better place for this */
+ char *ptr = (char *)0x3000000;
+ char *gdt = (char *)0x3100000;
+
+ extern char gdt64[];
+
+ memcpy(ptr, cpu_call64, 0x1000);
+ memcpy(gdt, gdt64, 0x100);
+
+ /*
+ * TODO(sjg@chromium.org): This manually inserts the pointers into
+ * the code. Tidy this up to avoid this.
+ */
+ func = (func_t)ptr;
+ ulong ofs = (ulong)cpu_call64 - (ulong)ptr;
+ *(ulong *)(ptr + 7) = (ulong)gdt;
+ *(ulong *)(ptr + 0xc) = (ulong)gdt + 2;
+ *(ulong *)(ptr + 0x13) = (ulong)gdt;
+ *(ulong *)(ptr + 0x117 - 0xd4) -= ofs;
+
+ /*
+ * Copy U-Boot from ROM
+ * TODO(sjg@chromium.org): Figure out a way to get the text base
+ * correctly here, and in the device-tree binman definition.
+ *
+ * Also consider using FIT so we get the correct image length and
+ * parameters.
+ */
+ memcpy((char *)target, (char *)0xfff00000, 0x100000);
+
+ /* Jump to U-Boot */
+ func((ulong)pgtable, 0, (ulong)target);
+
+ return -EFAULT;
+}
+
+#ifdef CONFIG_SMP
+static int enable_smis(struct udevice *cpu, void *unused)
+{
+ return 0;
+}
+
+static struct mp_flight_record mp_steps[] = {
+ MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
+ /* Wait for APs to finish initialization before proceeding */
+ MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
+};
+
+int x86_mp_init(void)
+{
+ struct mp_params mp_params;
+
+ mp_params.parallel_microcode_load = 0,
+ mp_params.flight_plan = &mp_steps[0];
+ mp_params.num_records = ARRAY_SIZE(mp_steps);
+ mp_params.microcode_pointer = 0;
+
+ if (mp_init(&mp_params)) {
+ printf("Warning: MP init failure\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+#endif
diff --git a/arch/x86/cpu/i386/interrupt.c b/arch/x86/cpu/i386/interrupt.c
new file mode 100644
index 0000000..a058303
--- /dev/null
+++ b/arch/x86/cpu/i386/interrupt.c
@@ -0,0 +1,615 @@
+/*
+ * (C) Copyright 2008-2011
+ * Graeme Russ, <graeme.russ@gmail.com>
+ *
+ * (C) Copyright 2002
+ * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
+ *
+ * Portions of this file are derived from the Linux kernel source
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <asm/control_regs.h>
+#include <asm/i8259.h>
+#include <asm/interrupt.h>
+#include <asm/io.h>
+#include <asm/lapic.h>
+#include <asm/processor-flags.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define DECLARE_INTERRUPT(x) \
+ ".globl irq_"#x"\n" \
+ ".hidden irq_"#x"\n" \
+ ".type irq_"#x", @function\n" \
+ "irq_"#x":\n" \
+ "pushl $"#x"\n" \
+ "jmp irq_common_entry\n"
+
+static char *exceptions[] = {
+ "Divide Error",
+ "Debug",
+ "NMI Interrupt",
+ "Breakpoint",
+ "Overflow",
+ "BOUND Range Exceeded",
+ "Invalid Opcode (Undefined Opcode)",
+ "Device Not Avaiable (No Math Coprocessor)",
+ "Double Fault",
+ "Coprocessor Segment Overrun",
+ "Invalid TSS",
+ "Segment Not Present",
+ "Stack Segment Fault",
+ "General Protection",
+ "Page Fault",
+ "Reserved",
+ "x87 FPU Floating-Point Error",
+ "Alignment Check",
+ "Machine Check",
+ "SIMD Floating-Point Exception",
+ "Virtualization Exception",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved"
+};
+
+static void dump_regs(struct irq_regs *regs)
+{
+ unsigned long cs, eip, eflags;
+ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
+ unsigned long d0, d1, d2, d3, d6, d7;
+ unsigned long sp;
+
+ /*
+ * Some exceptions cause an error code to be saved on the current stack
+ * after the EIP value. We should extract CS/EIP/EFLAGS from different
+ * position on the stack based on the exception number.
+ */
+ switch (regs->irq_id) {
+ case EXC_DF:
+ case EXC_TS:
+ case EXC_NP:
+ case EXC_SS:
+ case EXC_GP:
+ case EXC_PF:
+ case EXC_AC:
+ cs = regs->context.ctx2.xcs;
+ eip = regs->context.ctx2.eip;
+ eflags = regs->context.ctx2.eflags;
+ /* We should fix up the ESP due to error code */
+ regs->esp += 4;
+ break;
+ default:
+ cs = regs->context.ctx1.xcs;
+ eip = regs->context.ctx1.eip;
+ eflags = regs->context.ctx1.eflags;
+ break;
+ }
+
+ printf("EIP: %04x:[<%08lx>] EFLAGS: %08lx\n",
+ (u16)cs, eip, eflags);
+ if (gd->flags & GD_FLG_RELOC)
+ printf("Original EIP :[<%08lx>]\n", eip - gd->reloc_off);
+
+ printf("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+ printf("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
+ regs->esi, regs->edi, regs->ebp, regs->esp);
+ printf(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
+ (u16)regs->xds, (u16)regs->xes, (u16)regs->xfs,
+ (u16)regs->xgs, (u16)regs->xss);
+
+ cr0 = read_cr0();
+ cr2 = read_cr2();
+ cr3 = read_cr3();
+ cr4 = read_cr4();
+
+ printf("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
+ cr0, cr2, cr3, cr4);
+
+ d0 = get_debugreg(0);
+ d1 = get_debugreg(1);
+ d2 = get_debugreg(2);
+ d3 = get_debugreg(3);
+
+ printf("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
+ d0, d1, d2, d3);
+
+ d6 = get_debugreg(6);
+ d7 = get_debugreg(7);
+ printf("DR6: %08lx DR7: %08lx\n",
+ d6, d7);
+
+ printf("Stack:\n");
+ sp = regs->esp;
+
+ sp += 64;
+
+ while (sp > (regs->esp - 16)) {
+ if (sp == regs->esp)
+ printf("--->");
+ else
+ printf(" ");
+ printf("0x%8.8lx : 0x%8.8lx\n", sp, (ulong)readl(sp));
+ sp -= 4;
+ }
+}
+
+static void do_exception(struct irq_regs *regs)
+{
+ printf("%s\n", exceptions[regs->irq_id]);
+ dump_regs(regs);
+ hang();
+}
+
+struct idt_entry {
+ u16 base_low;
+ u16 selector;
+ u8 res;
+ u8 access;
+ u16 base_high;
+} __packed;
+
+struct desc_ptr {
+ unsigned short size;
+ unsigned long address;
+} __packed;
+
+struct idt_entry idt[256] __aligned(16);
+
+struct desc_ptr idt_ptr;
+
+static inline void load_idt(const struct desc_ptr *dtr)
+{
+ asm volatile("cs lidt %0" : : "m" (*dtr));
+}
+
+void set_vector(u8 intnum, void *routine)
+{
+ idt[intnum].base_high = (u16)((ulong)(routine) >> 16);
+ idt[intnum].base_low = (u16)((ulong)(routine) & 0xffff);
+}
+
+/*
+ * Ideally these would be defined static to avoid a checkpatch warning, but
+ * the compiler cannot see them in the inline asm and complains that they
+ * aren't defined
+ */
+void irq_0(void);
+void irq_1(void);
+
+int cpu_init_interrupts(void)
+{
+ int i;
+
+ int irq_entry_size = irq_1 - irq_0;
+ void *irq_entry = (void *)irq_0;
+
+ /* Setup the IDT */
+ for (i = 0; i < 256; i++) {
+ idt[i].access = 0x8e;
+ idt[i].res = 0;
+ idt[i].selector = X86_GDT_ENTRY_32BIT_CS * X86_GDT_ENTRY_SIZE;
+ set_vector(i, irq_entry);
+ irq_entry += irq_entry_size;
+ }
+
+ idt_ptr.size = 256 * 8 - 1;
+ idt_ptr.address = (unsigned long) idt;
+
+ load_idt(&idt_ptr);
+
+ return 0;
+}
+
+void *x86_get_idt(void)
+{
+ return &idt_ptr;
+}
+
+void __do_irq(int irq)
+{
+ printf("Unhandled IRQ : %d\n", irq);
+}
+void do_irq(int irq) __attribute__((weak, alias("__do_irq")));
+
+void enable_interrupts(void)
+{
+ asm("sti\n");
+}
+
+int disable_interrupts(void)
+{
+ long flags;
+
+#if CONFIG_IS_ENABLED(X86_64)
+ asm volatile ("pushfq ; popq %0 ; cli\n" : "=g" (flags) : );
+#else
+ asm volatile ("pushfl ; popl %0 ; cli\n" : "=g" (flags) : );
+#endif
+ return flags & X86_EFLAGS_IF;
+}
+
+int interrupt_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ /* Try to set up the interrupt router, but don't require one */
+ ret = uclass_first_device_err(UCLASS_IRQ, &dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ /*
+ * When running as an EFI application we are not in control of
+ * interrupts and should leave them alone.
+ */
+#ifndef CONFIG_EFI_APP
+ /* Just in case... */
+ disable_interrupts();
+
+#ifdef CONFIG_I8259_PIC
+ /* Initialize the master/slave i8259 pic */
+ i8259_init();
+#endif
+
+ lapic_setup();
+
+ /* Initialize core interrupt and exception functionality of CPU */
+ cpu_init_interrupts();
+
+ /*
+ * It is now safe to enable interrupts.
+ *
+ * TODO(sjg@chromium.org): But we don't handle these correctly when
+ * booted from EFI.
+ */
+ if (ll_boot_init())
+ enable_interrupts();
+#endif
+
+ return 0;
+}
+
+/* IRQ Low-Level Service Routine */
+void irq_llsr(struct irq_regs *regs)
+{
+ /*
+ * For detailed description of each exception, refer to:
+ * Intel® 64 and IA-32 Architectures Software Developer's Manual
+ * Volume 1: Basic Architecture
+ * Order Number: 253665-029US, November 2008
+ * Table 6-1. Exceptions and Interrupts
+ */
+ if (regs->irq_id < 32) {
+ /* Architecture defined exception */
+ do_exception(regs);
+ } else {
+ /* Hardware or User IRQ */
+ do_irq(regs->irq_id);
+ }
+}
+
+/*
+ * OK - This looks really horrible, but it serves a purpose - It helps create
+ * fully relocatable code.
+ * - The call to irq_llsr will be a relative jump
+ * - The IRQ entries will be guaranteed to be in order
+ * Interrupt entries are now very small (a push and a jump) but they are
+ * now slower (all registers pushed on stack which provides complete
+ * crash dumps in the low level handlers
+ *
+ * Interrupt Entry Point:
+ * - Interrupt has caused eflags, CS and EIP to be pushed
+ * - Interrupt Vector Handler has pushed orig_eax
+ * - pt_regs.esp needs to be adjusted by 40 bytes:
+ * 12 bytes pushed by CPU (EFLAGSF, CS, EIP)
+ * 4 bytes pushed by vector handler (irq_id)
+ * 24 bytes pushed before SP (SS, GS, FS, ES, DS, EAX)
+ * NOTE: Only longs are pushed on/popped off the stack!
+ */
+asm(".globl irq_common_entry\n" \
+ ".hidden irq_common_entry\n" \
+ ".type irq_common_entry, @function\n" \
+ "irq_common_entry:\n" \
+ "cld\n" \
+ "pushl %ss\n" \
+ "pushl %gs\n" \
+ "pushl %fs\n" \
+ "pushl %es\n" \
+ "pushl %ds\n" \
+ "pushl %eax\n" \
+ "movl %esp, %eax\n" \
+ "addl $40, %eax\n" \
+ "pushl %eax\n" \
+ "pushl %ebp\n" \
+ "pushl %edi\n" \
+ "pushl %esi\n" \
+ "pushl %edx\n" \
+ "pushl %ecx\n" \
+ "pushl %ebx\n" \
+ "mov %esp, %eax\n" \
+ "call irq_llsr\n" \
+ "popl %ebx\n" \
+ "popl %ecx\n" \
+ "popl %edx\n" \
+ "popl %esi\n" \
+ "popl %edi\n" \
+ "popl %ebp\n" \
+ "popl %eax\n" \
+ "popl %eax\n" \
+ "popl %ds\n" \
+ "popl %es\n" \
+ "popl %fs\n" \
+ "popl %gs\n" \
+ "popl %ss\n" \
+ "add $4, %esp\n" \
+ "iret\n" \
+ DECLARE_INTERRUPT(0) \
+ DECLARE_INTERRUPT(1) \
+ DECLARE_INTERRUPT(2) \
+ DECLARE_INTERRUPT(3) \
+ DECLARE_INTERRUPT(4) \
+ DECLARE_INTERRUPT(5) \
+ DECLARE_INTERRUPT(6) \
+ DECLARE_INTERRUPT(7) \
+ DECLARE_INTERRUPT(8) \
+ DECLARE_INTERRUPT(9) \
+ DECLARE_INTERRUPT(10) \
+ DECLARE_INTERRUPT(11) \
+ DECLARE_INTERRUPT(12) \
+ DECLARE_INTERRUPT(13) \
+ DECLARE_INTERRUPT(14) \
+ DECLARE_INTERRUPT(15) \
+ DECLARE_INTERRUPT(16) \
+ DECLARE_INTERRUPT(17) \
+ DECLARE_INTERRUPT(18) \
+ DECLARE_INTERRUPT(19) \
+ DECLARE_INTERRUPT(20) \
+ DECLARE_INTERRUPT(21) \
+ DECLARE_INTERRUPT(22) \
+ DECLARE_INTERRUPT(23) \
+ DECLARE_INTERRUPT(24) \
+ DECLARE_INTERRUPT(25) \
+ DECLARE_INTERRUPT(26) \
+ DECLARE_INTERRUPT(27) \
+ DECLARE_INTERRUPT(28) \
+ DECLARE_INTERRUPT(29) \
+ DECLARE_INTERRUPT(30) \
+ DECLARE_INTERRUPT(31) \
+ DECLARE_INTERRUPT(32) \
+ DECLARE_INTERRUPT(33) \
+ DECLARE_INTERRUPT(34) \
+ DECLARE_INTERRUPT(35) \
+ DECLARE_INTERRUPT(36) \
+ DECLARE_INTERRUPT(37) \
+ DECLARE_INTERRUPT(38) \
+ DECLARE_INTERRUPT(39) \
+ DECLARE_INTERRUPT(40) \
+ DECLARE_INTERRUPT(41) \
+ DECLARE_INTERRUPT(42) \
+ DECLARE_INTERRUPT(43) \
+ DECLARE_INTERRUPT(44) \
+ DECLARE_INTERRUPT(45) \
+ DECLARE_INTERRUPT(46) \
+ DECLARE_INTERRUPT(47) \
+ DECLARE_INTERRUPT(48) \
+ DECLARE_INTERRUPT(49) \
+ DECLARE_INTERRUPT(50) \
+ DECLARE_INTERRUPT(51) \
+ DECLARE_INTERRUPT(52) \
+ DECLARE_INTERRUPT(53) \
+ DECLARE_INTERRUPT(54) \
+ DECLARE_INTERRUPT(55) \
+ DECLARE_INTERRUPT(56) \
+ DECLARE_INTERRUPT(57) \
+ DECLARE_INTERRUPT(58) \
+ DECLARE_INTERRUPT(59) \
+ DECLARE_INTERRUPT(60) \
+ DECLARE_INTERRUPT(61) \
+ DECLARE_INTERRUPT(62) \
+ DECLARE_INTERRUPT(63) \
+ DECLARE_INTERRUPT(64) \
+ DECLARE_INTERRUPT(65) \
+ DECLARE_INTERRUPT(66) \
+ DECLARE_INTERRUPT(67) \
+ DECLARE_INTERRUPT(68) \
+ DECLARE_INTERRUPT(69) \
+ DECLARE_INTERRUPT(70) \
+ DECLARE_INTERRUPT(71) \
+ DECLARE_INTERRUPT(72) \
+ DECLARE_INTERRUPT(73) \
+ DECLARE_INTERRUPT(74) \
+ DECLARE_INTERRUPT(75) \
+ DECLARE_INTERRUPT(76) \
+ DECLARE_INTERRUPT(77) \
+ DECLARE_INTERRUPT(78) \
+ DECLARE_INTERRUPT(79) \
+ DECLARE_INTERRUPT(80) \
+ DECLARE_INTERRUPT(81) \
+ DECLARE_INTERRUPT(82) \
+ DECLARE_INTERRUPT(83) \
+ DECLARE_INTERRUPT(84) \
+ DECLARE_INTERRUPT(85) \
+ DECLARE_INTERRUPT(86) \
+ DECLARE_INTERRUPT(87) \
+ DECLARE_INTERRUPT(88) \
+ DECLARE_INTERRUPT(89) \
+ DECLARE_INTERRUPT(90) \
+ DECLARE_INTERRUPT(91) \
+ DECLARE_INTERRUPT(92) \
+ DECLARE_INTERRUPT(93) \
+ DECLARE_INTERRUPT(94) \
+ DECLARE_INTERRUPT(95) \
+ DECLARE_INTERRUPT(97) \
+ DECLARE_INTERRUPT(96) \
+ DECLARE_INTERRUPT(98) \
+ DECLARE_INTERRUPT(99) \
+ DECLARE_INTERRUPT(100) \
+ DECLARE_INTERRUPT(101) \
+ DECLARE_INTERRUPT(102) \
+ DECLARE_INTERRUPT(103) \
+ DECLARE_INTERRUPT(104) \
+ DECLARE_INTERRUPT(105) \
+ DECLARE_INTERRUPT(106) \
+ DECLARE_INTERRUPT(107) \
+ DECLARE_INTERRUPT(108) \
+ DECLARE_INTERRUPT(109) \
+ DECLARE_INTERRUPT(110) \
+ DECLARE_INTERRUPT(111) \
+ DECLARE_INTERRUPT(112) \
+ DECLARE_INTERRUPT(113) \
+ DECLARE_INTERRUPT(114) \
+ DECLARE_INTERRUPT(115) \
+ DECLARE_INTERRUPT(116) \
+ DECLARE_INTERRUPT(117) \
+ DECLARE_INTERRUPT(118) \
+ DECLARE_INTERRUPT(119) \
+ DECLARE_INTERRUPT(120) \
+ DECLARE_INTERRUPT(121) \
+ DECLARE_INTERRUPT(122) \
+ DECLARE_INTERRUPT(123) \
+ DECLARE_INTERRUPT(124) \
+ DECLARE_INTERRUPT(125) \
+ DECLARE_INTERRUPT(126) \
+ DECLARE_INTERRUPT(127) \
+ DECLARE_INTERRUPT(128) \
+ DECLARE_INTERRUPT(129) \
+ DECLARE_INTERRUPT(130) \
+ DECLARE_INTERRUPT(131) \
+ DECLARE_INTERRUPT(132) \
+ DECLARE_INTERRUPT(133) \
+ DECLARE_INTERRUPT(134) \
+ DECLARE_INTERRUPT(135) \
+ DECLARE_INTERRUPT(136) \
+ DECLARE_INTERRUPT(137) \
+ DECLARE_INTERRUPT(138) \
+ DECLARE_INTERRUPT(139) \
+ DECLARE_INTERRUPT(140) \
+ DECLARE_INTERRUPT(141) \
+ DECLARE_INTERRUPT(142) \
+ DECLARE_INTERRUPT(143) \
+ DECLARE_INTERRUPT(144) \
+ DECLARE_INTERRUPT(145) \
+ DECLARE_INTERRUPT(146) \
+ DECLARE_INTERRUPT(147) \
+ DECLARE_INTERRUPT(148) \
+ DECLARE_INTERRUPT(149) \
+ DECLARE_INTERRUPT(150) \
+ DECLARE_INTERRUPT(151) \
+ DECLARE_INTERRUPT(152) \
+ DECLARE_INTERRUPT(153) \
+ DECLARE_INTERRUPT(154) \
+ DECLARE_INTERRUPT(155) \
+ DECLARE_INTERRUPT(156) \
+ DECLARE_INTERRUPT(157) \
+ DECLARE_INTERRUPT(158) \
+ DECLARE_INTERRUPT(159) \
+ DECLARE_INTERRUPT(160) \
+ DECLARE_INTERRUPT(161) \
+ DECLARE_INTERRUPT(162) \
+ DECLARE_INTERRUPT(163) \
+ DECLARE_INTERRUPT(164) \
+ DECLARE_INTERRUPT(165) \
+ DECLARE_INTERRUPT(166) \
+ DECLARE_INTERRUPT(167) \
+ DECLARE_INTERRUPT(168) \
+ DECLARE_INTERRUPT(169) \
+ DECLARE_INTERRUPT(170) \
+ DECLARE_INTERRUPT(171) \
+ DECLARE_INTERRUPT(172) \
+ DECLARE_INTERRUPT(173) \
+ DECLARE_INTERRUPT(174) \
+ DECLARE_INTERRUPT(175) \
+ DECLARE_INTERRUPT(176) \
+ DECLARE_INTERRUPT(177) \
+ DECLARE_INTERRUPT(178) \
+ DECLARE_INTERRUPT(179) \
+ DECLARE_INTERRUPT(180) \
+ DECLARE_INTERRUPT(181) \
+ DECLARE_INTERRUPT(182) \
+ DECLARE_INTERRUPT(183) \
+ DECLARE_INTERRUPT(184) \
+ DECLARE_INTERRUPT(185) \
+ DECLARE_INTERRUPT(186) \
+ DECLARE_INTERRUPT(187) \
+ DECLARE_INTERRUPT(188) \
+ DECLARE_INTERRUPT(189) \
+ DECLARE_INTERRUPT(190) \
+ DECLARE_INTERRUPT(191) \
+ DECLARE_INTERRUPT(192) \
+ DECLARE_INTERRUPT(193) \
+ DECLARE_INTERRUPT(194) \
+ DECLARE_INTERRUPT(195) \
+ DECLARE_INTERRUPT(196) \
+ DECLARE_INTERRUPT(197) \
+ DECLARE_INTERRUPT(198) \
+ DECLARE_INTERRUPT(199) \
+ DECLARE_INTERRUPT(200) \
+ DECLARE_INTERRUPT(201) \
+ DECLARE_INTERRUPT(202) \
+ DECLARE_INTERRUPT(203) \
+ DECLARE_INTERRUPT(204) \
+ DECLARE_INTERRUPT(205) \
+ DECLARE_INTERRUPT(206) \
+ DECLARE_INTERRUPT(207) \
+ DECLARE_INTERRUPT(208) \
+ DECLARE_INTERRUPT(209) \
+ DECLARE_INTERRUPT(210) \
+ DECLARE_INTERRUPT(211) \
+ DECLARE_INTERRUPT(212) \
+ DECLARE_INTERRUPT(213) \
+ DECLARE_INTERRUPT(214) \
+ DECLARE_INTERRUPT(215) \
+ DECLARE_INTERRUPT(216) \
+ DECLARE_INTERRUPT(217) \
+ DECLARE_INTERRUPT(218) \
+ DECLARE_INTERRUPT(219) \
+ DECLARE_INTERRUPT(220) \
+ DECLARE_INTERRUPT(221) \
+ DECLARE_INTERRUPT(222) \
+ DECLARE_INTERRUPT(223) \
+ DECLARE_INTERRUPT(224) \
+ DECLARE_INTERRUPT(225) \
+ DECLARE_INTERRUPT(226) \
+ DECLARE_INTERRUPT(227) \
+ DECLARE_INTERRUPT(228) \
+ DECLARE_INTERRUPT(229) \
+ DECLARE_INTERRUPT(230) \
+ DECLARE_INTERRUPT(231) \
+ DECLARE_INTERRUPT(232) \
+ DECLARE_INTERRUPT(233) \
+ DECLARE_INTERRUPT(234) \
+ DECLARE_INTERRUPT(235) \
+ DECLARE_INTERRUPT(236) \
+ DECLARE_INTERRUPT(237) \
+ DECLARE_INTERRUPT(238) \
+ DECLARE_INTERRUPT(239) \
+ DECLARE_INTERRUPT(240) \
+ DECLARE_INTERRUPT(241) \
+ DECLARE_INTERRUPT(242) \
+ DECLARE_INTERRUPT(243) \
+ DECLARE_INTERRUPT(244) \
+ DECLARE_INTERRUPT(245) \
+ DECLARE_INTERRUPT(246) \
+ DECLARE_INTERRUPT(247) \
+ DECLARE_INTERRUPT(248) \
+ DECLARE_INTERRUPT(249) \
+ DECLARE_INTERRUPT(250) \
+ DECLARE_INTERRUPT(251) \
+ DECLARE_INTERRUPT(252) \
+ DECLARE_INTERRUPT(253) \
+ DECLARE_INTERRUPT(254) \
+ DECLARE_INTERRUPT(255));
diff --git a/arch/x86/cpu/i386/setjmp.S b/arch/x86/cpu/i386/setjmp.S
new file mode 100644
index 0000000..2ea1c6c
--- /dev/null
+++ b/arch/x86/cpu/i386/setjmp.S
@@ -0,0 +1,61 @@
+/*
+ * Written by H. Peter Anvin <hpa@zytor.com>
+ * Brought in from Linux v4.4 and modified for U-Boot
+ * From Linux arch/um/sys-i386/setjmp.S
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#define _REGPARM
+
+/*
+ * The jmp_buf is assumed to contain the following, in order:
+ * %ebx
+ * %esp
+ * %ebp
+ * %esi
+ * %edi
+ * <return address>
+ */
+
+ .text
+ .align 4
+ .globl setjmp
+ .type setjmp, @function
+setjmp:
+#ifdef _REGPARM
+ movl %eax, %edx
+#else
+ movl 4(%esp), %edx
+#endif
+ popl %ecx /* Return address, and adjust the stack */
+ xorl %eax, %eax /* Return value */
+ movl %ebx, (%edx)
+ movl %esp, 4(%edx) /* Post-return %esp! */
+ pushl %ecx /* Make the call/return stack happy */
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %edi, 16(%edx)
+ movl %ecx, 20(%edx) /* Return address */
+ ret
+
+ /* Provide function size if needed */
+ .size setjmp, .-setjmp
+
+ .align 4
+ .globl longjmp
+ .type longjmp, @function
+longjmp:
+#ifdef _REGPARM
+ xchgl %eax, %edx
+#else
+ movl 4(%esp), %edx /* jmp_ptr address */
+#endif
+ movl (%edx), %ebx
+ movl 4(%edx), %esp
+ movl 8(%edx), %ebp
+ movl 12(%edx), %esi
+ movl 16(%edx), %edi
+ jmp *20(%edx)
+
+ .size longjmp, .-longjmp