diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/lapic.h | 124 | ||||
-rw-r--r-- | arch/x86/include/asm/post.h | 1 |
2 files changed, 123 insertions, 2 deletions
diff --git a/arch/x86/include/asm/lapic.h b/arch/x86/include/asm/lapic.h index 948e643..0a7f443 100644 --- a/arch/x86/include/asm/lapic.h +++ b/arch/x86/include/asm/lapic.h @@ -14,6 +14,13 @@ #include <asm/msr.h> #include <asm/processor.h> +/* See if I need to initialize the local apic */ +#if CONFIG_SMP || CONFIG_IOAPIC +# define NEED_LAPIC 1 +#else +# define NEED_LAPIC 0 +#endif + static inline __attribute__((always_inline)) unsigned long lapic_read(unsigned long reg) { @@ -37,8 +44,9 @@ static inline void enable_lapic(void) msr = msr_read(LAPIC_BASE_MSR); msr.hi &= 0xffffff00; - msr.lo &= 0x000007ff; - msr.lo |= LAPIC_DEFAULT_BASE | (1 << 11); + msr.lo |= LAPIC_BASE_MSR_ENABLE; + msr.lo &= ~LAPIC_BASE_MSR_ADDR_MASK; + msr.lo |= LAPIC_DEFAULT_BASE; msr_write(LAPIC_BASE_MSR, msr); } @@ -56,4 +64,116 @@ static inline __attribute__((always_inline)) unsigned long lapicid(void) return lapic_read(LAPIC_ID) >> 24; } +#if !CONFIG_AP_IN_SIPI_WAIT +/* If we need to go back to sipi wait, we use the long non-inlined version of + * this function in lapic_cpu_init.c + */ +static inline __attribute__((always_inline)) void stop_this_cpu(void) +{ + /* Called by an AP when it is ready to halt and wait for a new task */ + for (;;) + cpu_hlt(); +} +#else +void stop_this_cpu(void); +#endif + +#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ + sizeof(*(ptr)))) + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +/* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway + * Note 2: xchg has side effect, so that attribute volatile is necessary, + * but generally the primitive is invalid, *ptr is output argument. --ANK + */ +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("xchgb %b0,%1" + : "=q" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 2: + __asm__ __volatile__("xchgw %w0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + case 4: + __asm__ __volatile__("xchgl %0,%1" + : "=r" (x) + : "m" (*__xg(ptr)), "0" (x) + : "memory"); + break; + } + + return x; +} + +static inline void lapic_write_atomic(unsigned long reg, unsigned long v) +{ + (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE + reg), v); +} + + +#ifdef X86_GOOD_APIC +# define FORCE_READ_AROUND_WRITE 0 +# define lapic_read_around(x) lapic_read(x) +# define lapic_write_around(x, y) lapic_write((x), (y)) +#else +# define FORCE_READ_AROUND_WRITE 1 +# define lapic_read_around(x) lapic_read(x) +# define lapic_write_around(x, y) lapic_write_atomic((x), (y)) +#endif + +static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue) +{ + int timeout; + unsigned long status; + int result; + lapic_wait_icr_idle(); + lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid)); + lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4)); + timeout = 0; + do { + status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK; + } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000); + + result = -1; + if (status == LAPIC_ICR_RR_VALID) { + *pvalue = lapic_read(LAPIC_RRR); + result = 0; + } + return result; +} + + +void lapic_setup(void); + +#if CONFIG_SMP +struct device; +int start_cpu(struct device *cpu); +#endif /* CONFIG_SMP */ + +int boot_cpu(void); + +/** + * struct x86_cpu_priv - Information about a single CPU + * + * @apic_id: Advanced Programmable Interrupt Controller Identifier, which is + * just a number representing the CPU core + * + * TODO: Move this to driver model once lifecycle is understood + */ +struct x86_cpu_priv { + int apic_id; + int start_err; +}; + #endif diff --git a/arch/x86/include/asm/post.h b/arch/x86/include/asm/post.h index ce68839..6d2ae5d 100644 --- a/arch/x86/include/asm/post.h +++ b/arch/x86/include/asm/post.h @@ -30,6 +30,7 @@ #define POST_PRE_MRC 0x2e #define POST_MRC 0x2f #define POST_DRAM 0x2f +#define POST_LAPIC 0x30 #define POST_RAM_FAILURE 0xea |