diff options
author | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-12-17 16:53:07 +0100 |
---|---|---|
committer | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-12-17 16:53:07 +0100 |
commit | cb5473205206c7f14cbb1e747f28ec75b48826e2 (patch) | |
tree | 8f4808d60917100b18a10b05230f7638a0a9bbcc /cpu/mpc86xx | |
parent | baf449fc5ff96f071bb0e3789fd3265f6d4fd9a0 (diff) | |
parent | 92c78a3bbcb2ce508b4bf1c4a1e0940406a024bb (diff) | |
download | u-boot-imx-cb5473205206c7f14cbb1e747f28ec75b48826e2.zip u-boot-imx-cb5473205206c7f14cbb1e747f28ec75b48826e2.tar.gz u-boot-imx-cb5473205206c7f14cbb1e747f28ec75b48826e2.tar.bz2 |
Merge branch 'fixes' into cleanups
Conflicts:
board/atmel/atngw100/atngw100.c
board/atmel/atstk1000/atstk1000.c
cpu/at32ap/at32ap700x/gpio.c
include/asm-avr32/arch-at32ap700x/clk.h
include/configs/atngw100.h
include/configs/atstk1002.h
include/configs/atstk1003.h
include/configs/atstk1004.h
include/configs/atstk1006.h
include/configs/favr-32-ezkit.h
include/configs/hammerhead.h
include/configs/mimc200.h
Diffstat (limited to 'cpu/mpc86xx')
-rw-r--r-- | cpu/mpc86xx/Makefile | 4 | ||||
-rw-r--r-- | cpu/mpc86xx/cache.S | 14 | ||||
-rw-r--r-- | cpu/mpc86xx/cpu.c | 56 | ||||
-rw-r--r-- | cpu/mpc86xx/cpu_init.c | 100 | ||||
-rw-r--r-- | cpu/mpc86xx/ddr-8641.c | 4 | ||||
-rw-r--r-- | cpu/mpc86xx/fdt.c | 25 | ||||
-rw-r--r-- | cpu/mpc86xx/interrupts.c | 133 | ||||
-rw-r--r-- | cpu/mpc86xx/mp.c | 68 | ||||
-rw-r--r-- | cpu/mpc86xx/mp.h | 7 | ||||
-rw-r--r-- | cpu/mpc86xx/release.S | 169 | ||||
-rw-r--r-- | cpu/mpc86xx/speed.c | 2 | ||||
-rw-r--r-- | cpu/mpc86xx/start.S | 283 |
12 files changed, 482 insertions, 383 deletions
diff --git a/cpu/mpc86xx/Makefile b/cpu/mpc86xx/Makefile index a9767ad..34a9755 100644 --- a/cpu/mpc86xx/Makefile +++ b/cpu/mpc86xx/Makefile @@ -31,6 +31,10 @@ LIB = $(obj)lib$(CPU).a START = start.o SOBJS = cache.o +ifneq ($(CONFIG_NUM_CPUS),1) +COBJS-y += mp.o +SOBJS += release.o +endif COBJS-y += traps.o COBJS-y += cpu.o COBJS-y += cpu_init.o diff --git a/cpu/mpc86xx/cache.S b/cpu/mpc86xx/cache.S index 80ff688..0bb058b 100644 --- a/cpu/mpc86xx/cache.S +++ b/cpu/mpc86xx/cache.S @@ -53,7 +53,7 @@ _GLOBAL(invalidate_l1_data_cache) /* * Flush data cache. */ -_GLOBAL(flush_data_cache) +_GLOBAL(flush_dcache) lis r3,0 lis r5,CACHE_LINE_SIZE flush: @@ -279,7 +279,7 @@ _GLOBAL(dcache_enable) mtspr HID0, r5 /* enable + invalidate */ mtspr HID0, r3 /* enable */ sync -#ifdef CFG_L2 +#ifdef CONFIG_SYS_L2 mflr r5 bl l2cache_enable /* uses r3 and r4 */ sync @@ -290,12 +290,12 @@ _GLOBAL(dcache_enable) /* * Disable data cache(s) - L1 and optionally L2 - * Calls flush_data_cache and l2cache_disable_no_flush. + * Calls flush_dcache and l2cache_disable_no_flush. * LR saved in r4 */ _GLOBAL(dcache_disable) mflr r4 /* save link register */ - bl flush_data_cache /* uses r3 and r5 */ + bl flush_dcache /* uses r3 and r5 */ sync mfspr r3, HID0 li r5, HID0_DCFI|HID0_DLOCK @@ -305,7 +305,7 @@ _GLOBAL(dcache_disable) andc r3, r3, r5 /* no enable, no invalidate */ mtspr HID0, r3 sync -#ifdef CFG_L2 +#ifdef CONFIG_SYS_L2 bl l2cache_disable_no_flush /* uses r3 */ #endif mtlr r4 /* restore link register */ @@ -363,11 +363,11 @@ _GLOBAL(l2cache_enable) /* * Disable L2 cache - * Calls flush_data_cache. LR is saved in r4 + * Calls flush_dcache. LR is saved in r4 */ _GLOBAL(l2cache_disable) mflr r4 /* save link register */ - bl flush_data_cache /* uses r3 and r5 */ + bl flush_dcache /* uses r3 and r5 */ sync mtlr r4 /* restore link register */ l2cache_disable_no_flush: /* provide way to disable L2 w/o flushing */ diff --git a/cpu/mpc86xx/cpu.c b/cpu/mpc86xx/cpu.c index ecea5b0..4cace98 100644 --- a/cpu/mpc86xx/cpu.c +++ b/cpu/mpc86xx/cpu.c @@ -28,6 +28,7 @@ #include <asm/cache.h> #include <asm/mmu.h> #include <mpc86xx.h> +#include <tsec.h> #include <asm/fsl_law.h> @@ -40,7 +41,7 @@ checkcpu(void) uint major, minor; uint lcrr; /* local bus clock ratio register */ uint clkdiv; /* clock divider portion of lcrr */ - volatile immap_t *immap = (immap_t *) CFG_IMMR; + volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR; volatile ccsr_gur_t *gur = &immap->im_gur; puts("Freescale PowerPC\n"); @@ -99,11 +100,11 @@ checkcpu(void) printf("MPX:%4lu MHz, ", sysinfo.freqSystemBus / 1000000); printf("DDR:%4lu MHz, ", sysinfo.freqSystemBus / 2000000); -#if defined(CFG_LBC_LCRR) - lcrr = CFG_LBC_LCRR; +#if defined(CONFIG_SYS_LBC_LCRR) + lcrr = CONFIG_SYS_LBC_LCRR; #else { - volatile immap_t *immap = (immap_t *) CFG_IMMR; + volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR; volatile ccsr_lbc_t *lbc = &immap->im_lbc; lcrr = lbc->lcrr; @@ -160,16 +161,16 @@ do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { #if !defined(CONFIG_MPC8641HPCN) && !defined(CONFIG_MPC8610HPCD) -#ifdef CFG_RESET_ADDRESS - ulong addr = CFG_RESET_ADDRESS; +#ifdef CONFIG_SYS_RESET_ADDRESS + ulong addr = CONFIG_SYS_RESET_ADDRESS; #else /* - * note: when CFG_MONITOR_BASE points to a RAM address, - * CFG_MONITOR_BASE - sizeof (ulong) is usually a valid + * note: when CONFIG_SYS_MONITOR_BASE points to a RAM address, + * CONFIG_SYS_MONITOR_BASE - sizeof (ulong) is usually a valid * address. Better pick an address known to be invalid on your - * system and assign it to CFG_RESET_ADDRESS. + * system and assign it to CONFIG_SYS_RESET_ADDRESS. */ - ulong addr = CFG_MONITOR_BASE - sizeof(ulong); + ulong addr = CONFIG_SYS_MONITOR_BASE - sizeof(ulong); #endif /* flush and disable I/D cache */ @@ -218,7 +219,7 @@ watchdog_reset(void) /* * This actually feed the hard enabled watchdog. */ - volatile immap_t *immap = (immap_t *)CFG_IMMR; + volatile immap_t *immap = (immap_t *)CONFIG_SYS_IMMR; volatile ccsr_wdt_t *wdt = &immap->im_wdt; volatile ccsr_gur_t *gur = &immap->im_gur; u32 tmp = gur->pordevsr; @@ -236,7 +237,7 @@ watchdog_reset(void) void dma_init(void) { - volatile immap_t *immap = (immap_t *) CFG_IMMR; + volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR; volatile ccsr_dma_t *dma = &immap->im_dma; dma->satr0 = 0x00040000; @@ -247,7 +248,7 @@ dma_init(void) uint dma_check(void) { - volatile immap_t *immap = (immap_t *) CFG_IMMR; + volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR; volatile ccsr_dma_t *dma = &immap->im_dma; volatile uint status = dma->sr0; @@ -265,7 +266,7 @@ dma_check(void) int dma_xfer(void *dest, uint count, void *src) { - volatile immap_t *immap = (immap_t *) CFG_IMMR; + volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR; volatile ccsr_dma_t *dma = &immap->im_dma; dma->dar0 = (uint) dest; @@ -287,7 +288,7 @@ dma_xfer(void *dest, uint count, void *src) */ void mpc86xx_reginfo(void) { - immap_t *immap = (immap_t *)CFG_IMMR; + immap_t *immap = (immap_t *)CONFIG_SYS_IMMR; ccsr_lbc_t *lbc = &immap->im_lbc; print_bats(); @@ -305,28 +306,15 @@ void mpc86xx_reginfo(void) } -#ifdef CONFIG_TSEC_ENET -/* Default initializations for TSEC controllers. To override, - * create a board-specific function called: - * int board_eth_init(bd_t *bis) +/* + * Initializes on-chip ethernet controllers. + * to override, implement board_eth_init() */ - -extern int tsec_initialize(bd_t * bis, int index, char *devname); - int cpu_eth_init(bd_t *bis) { -#if defined(CONFIG_TSEC1) - tsec_initialize(bis, 0, CONFIG_TSEC1_NAME); -#endif -#if defined(CONFIG_TSEC2) - tsec_initialize(bis, 1, CONFIG_TSEC2_NAME); -#endif -#if defined(CONFIG_TSEC3) - tsec_initialize(bis, 2, CONFIG_TSEC3_NAME); -#endif -#if defined(CONFIG_TSEC4) - tsec_initialize(bis, 3, CONFIG_TSEC4_NAME); +#if defined(CONFIG_TSEC_ENET) + tsec_standard_init(bis); #endif + return 0; } -#endif /* CONFIG_TSEC_ENET */ diff --git a/cpu/mpc86xx/cpu_init.c b/cpu/mpc86xx/cpu_init.c index 1fda3fe..a7e6036 100644 --- a/cpu/mpc86xx/cpu_init.c +++ b/cpu/mpc86xx/cpu_init.c @@ -31,6 +31,9 @@ #include <mpc86xx.h> #include <asm/mmu.h> #include <asm/fsl_law.h> +#include "mp.h" + +void setup_bats(void); DECLARE_GLOBAL_DATA_PTR; @@ -43,11 +46,11 @@ DECLARE_GLOBAL_DATA_PTR; void cpu_init_f(void) { - volatile immap_t *immap = (immap_t *)CFG_IMMR; + volatile immap_t *immap = (immap_t *)CONFIG_SYS_IMMR; volatile ccsr_lbc_t *memctl = &immap->im_lbc; /* Pointer is writable since we allocated a register for it */ - gd = (gd_t *) (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET); + gd = (gd_t *) (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET); /* Clear initial global data */ memset ((void *) gd, 0, sizeof (gd_t)); @@ -56,57 +59,59 @@ void cpu_init_f(void) init_laws(); #endif + setup_bats(); + /* Map banks 0 and 1 to the FLASH banks 0 and 1 at preliminary * addresses - these have to be modified later when FLASH size * has been determined */ -#if defined(CFG_OR0_REMAP) - memctl->or0 = CFG_OR0_REMAP; +#if defined(CONFIG_SYS_OR0_REMAP) + memctl->or0 = CONFIG_SYS_OR0_REMAP; #endif -#if defined(CFG_OR1_REMAP) - memctl->or1 = CFG_OR1_REMAP; +#if defined(CONFIG_SYS_OR1_REMAP) + memctl->or1 = CONFIG_SYS_OR1_REMAP; #endif /* now restrict to preliminary range */ -#if defined(CFG_BR0_PRELIM) && defined(CFG_OR0_PRELIM) - memctl->br0 = CFG_BR0_PRELIM; - memctl->or0 = CFG_OR0_PRELIM; +#if defined(CONFIG_SYS_BR0_PRELIM) && defined(CONFIG_SYS_OR0_PRELIM) + memctl->br0 = CONFIG_SYS_BR0_PRELIM; + memctl->or0 = CONFIG_SYS_OR0_PRELIM; #endif -#if defined(CFG_BR1_PRELIM) && defined(CFG_OR1_PRELIM) - memctl->or1 = CFG_OR1_PRELIM; - memctl->br1 = CFG_BR1_PRELIM; +#if defined(CONFIG_SYS_BR1_PRELIM) && defined(CONFIG_SYS_OR1_PRELIM) + memctl->or1 = CONFIG_SYS_OR1_PRELIM; + memctl->br1 = CONFIG_SYS_BR1_PRELIM; #endif -#if defined(CFG_BR2_PRELIM) && defined(CFG_OR2_PRELIM) - memctl->or2 = CFG_OR2_PRELIM; - memctl->br2 = CFG_BR2_PRELIM; +#if defined(CONFIG_SYS_BR2_PRELIM) && defined(CONFIG_SYS_OR2_PRELIM) + memctl->or2 = CONFIG_SYS_OR2_PRELIM; + memctl->br2 = CONFIG_SYS_BR2_PRELIM; #endif -#if defined(CFG_BR3_PRELIM) && defined(CFG_OR3_PRELIM) - memctl->or3 = CFG_OR3_PRELIM; - memctl->br3 = CFG_BR3_PRELIM; +#if defined(CONFIG_SYS_BR3_PRELIM) && defined(CONFIG_SYS_OR3_PRELIM) + memctl->or3 = CONFIG_SYS_OR3_PRELIM; + memctl->br3 = CONFIG_SYS_BR3_PRELIM; #endif -#if defined(CFG_BR4_PRELIM) && defined(CFG_OR4_PRELIM) - memctl->or4 = CFG_OR4_PRELIM; - memctl->br4 = CFG_BR4_PRELIM; +#if defined(CONFIG_SYS_BR4_PRELIM) && defined(CONFIG_SYS_OR4_PRELIM) + memctl->or4 = CONFIG_SYS_OR4_PRELIM; + memctl->br4 = CONFIG_SYS_BR4_PRELIM; #endif -#if defined(CFG_BR5_PRELIM) && defined(CFG_OR5_PRELIM) - memctl->or5 = CFG_OR5_PRELIM; - memctl->br5 = CFG_BR5_PRELIM; +#if defined(CONFIG_SYS_BR5_PRELIM) && defined(CONFIG_SYS_OR5_PRELIM) + memctl->or5 = CONFIG_SYS_OR5_PRELIM; + memctl->br5 = CONFIG_SYS_BR5_PRELIM; #endif -#if defined(CFG_BR6_PRELIM) && defined(CFG_OR6_PRELIM) - memctl->or6 = CFG_OR6_PRELIM; - memctl->br6 = CFG_BR6_PRELIM; +#if defined(CONFIG_SYS_BR6_PRELIM) && defined(CONFIG_SYS_OR6_PRELIM) + memctl->or6 = CONFIG_SYS_OR6_PRELIM; + memctl->br6 = CONFIG_SYS_BR6_PRELIM; #endif -#if defined(CFG_BR7_PRELIM) && defined(CFG_OR7_PRELIM) - memctl->or7 = CFG_OR7_PRELIM; - memctl->br7 = CFG_BR7_PRELIM; +#if defined(CONFIG_SYS_BR7_PRELIM) && defined(CONFIG_SYS_OR7_PRELIM) + memctl->or7 = CONFIG_SYS_OR7_PRELIM; + memctl->br7 = CONFIG_SYS_BR7_PRELIM; #endif /* enable the timebase bit in HID0 */ @@ -121,28 +126,31 @@ void cpu_init_f(void) */ int cpu_init_r(void) { +#if (CONFIG_NUM_CPUS > 1) + setup_mp(); +#endif return 0; } /* Set up BAT registers */ void setup_bats(void) { - write_bat(DBAT0, CFG_DBAT0U, CFG_DBAT0L); - write_bat(IBAT0, CFG_IBAT0U, CFG_IBAT0L); - write_bat(DBAT1, CFG_DBAT1U, CFG_DBAT1L); - write_bat(IBAT1, CFG_IBAT1U, CFG_IBAT1L); - write_bat(DBAT2, CFG_DBAT2U, CFG_DBAT2L); - write_bat(IBAT2, CFG_IBAT2U, CFG_IBAT2L); - write_bat(DBAT3, CFG_DBAT3U, CFG_DBAT3L); - write_bat(IBAT3, CFG_IBAT3U, CFG_IBAT3L); - write_bat(DBAT4, CFG_DBAT4U, CFG_DBAT4L); - write_bat(IBAT4, CFG_IBAT4U, CFG_IBAT4L); - write_bat(DBAT5, CFG_DBAT5U, CFG_DBAT5L); - write_bat(IBAT5, CFG_IBAT5U, CFG_IBAT5L); - write_bat(DBAT6, CFG_DBAT6U, CFG_DBAT6L); - write_bat(IBAT6, CFG_IBAT6U, CFG_IBAT6L); - write_bat(DBAT7, CFG_DBAT7U, CFG_DBAT7L); - write_bat(IBAT7, CFG_IBAT7U, CFG_IBAT7L); + write_bat(DBAT0, CONFIG_SYS_DBAT0U, CONFIG_SYS_DBAT0L); + write_bat(IBAT0, CONFIG_SYS_IBAT0U, CONFIG_SYS_IBAT0L); + write_bat(DBAT1, CONFIG_SYS_DBAT1U, CONFIG_SYS_DBAT1L); + write_bat(IBAT1, CONFIG_SYS_IBAT1U, CONFIG_SYS_IBAT1L); + write_bat(DBAT2, CONFIG_SYS_DBAT2U, CONFIG_SYS_DBAT2L); + write_bat(IBAT2, CONFIG_SYS_IBAT2U, CONFIG_SYS_IBAT2L); + write_bat(DBAT3, CONFIG_SYS_DBAT3U, CONFIG_SYS_DBAT3L); + write_bat(IBAT3, CONFIG_SYS_IBAT3U, CONFIG_SYS_IBAT3L); + write_bat(DBAT4, CONFIG_SYS_DBAT4U, CONFIG_SYS_DBAT4L); + write_bat(IBAT4, CONFIG_SYS_IBAT4U, CONFIG_SYS_IBAT4L); + write_bat(DBAT5, CONFIG_SYS_DBAT5U, CONFIG_SYS_DBAT5L); + write_bat(IBAT5, CONFIG_SYS_IBAT5U, CONFIG_SYS_IBAT5L); + write_bat(DBAT6, CONFIG_SYS_DBAT6U, CONFIG_SYS_DBAT6L); + write_bat(IBAT6, CONFIG_SYS_IBAT6U, CONFIG_SYS_IBAT6L); + write_bat(DBAT7, CONFIG_SYS_DBAT7U, CONFIG_SYS_DBAT7L); + write_bat(IBAT7, CONFIG_SYS_IBAT7U, CONFIG_SYS_IBAT7L); return; } diff --git a/cpu/mpc86xx/ddr-8641.c b/cpu/mpc86xx/ddr-8641.c index f936182..51d0102 100644 --- a/cpu/mpc86xx/ddr-8641.c +++ b/cpu/mpc86xx/ddr-8641.c @@ -22,10 +22,10 @@ void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, switch (ctrl_num) { case 0: - ddr = (void *)CFG_MPC86xx_DDR_ADDR; + ddr = (void *)CONFIG_SYS_MPC86xx_DDR_ADDR; break; case 1: - ddr = (void *)CFG_MPC86xx_DDR2_ADDR; + ddr = (void *)CONFIG_SYS_MPC86xx_DDR2_ADDR; break; default: printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num); diff --git a/cpu/mpc86xx/fdt.c b/cpu/mpc86xx/fdt.c index 12d9052..3adfad9 100644 --- a/cpu/mpc86xx/fdt.c +++ b/cpu/mpc86xx/fdt.c @@ -9,9 +9,17 @@ #include <common.h> #include <libfdt.h> #include <fdt_support.h> +#include "mp.h" + +DECLARE_GLOBAL_DATA_PTR; void ft_cpu_setup(void *blob, bd_t *bd) { +#if (CONFIG_NUM_CPUS > 1) + int off; + u32 bootpg; +#endif + do_fixup_by_prop_u32(blob, "device_type", "cpu", 4, "timebase-frequency", bd->bi_busfreq / 4, 1); do_fixup_by_prop_u32(blob, "device_type", "cpu", 4, @@ -28,8 +36,21 @@ void ft_cpu_setup(void *blob, bd_t *bd) fdt_fixup_ethernet(blob); #endif -#ifdef CFG_NS16550 +#ifdef CONFIG_SYS_NS16550 do_fixup_by_compat_u32(blob, "ns16550", - "clock-frequency", CFG_NS16550_CLK, 1); + "clock-frequency", CONFIG_SYS_NS16550_CLK, 1); +#endif + +#if (CONFIG_NUM_CPUS > 1) + /* if we have 4G or more of memory, put the boot page at 4Gb-1M */ + if (gd->ram_size > 0xfffff000) + bootpg = 0xfff00000; + else + bootpg = gd->ram_size - (1024 * 1024); + + /* Reserve the boot page so OSes dont use it */ + off = fdt_add_mem_rsv(blob, bootpg, (u64)4096); + if (off < 0) + printf("%s: %s\n", __FUNCTION__, fdt_strerror(off)); #endif } diff --git a/cpu/mpc86xx/interrupts.c b/cpu/mpc86xx/interrupts.c index f16744b..c78fc72 100644 --- a/cpu/mpc86xx/interrupts.c +++ b/cpu/mpc86xx/interrupts.c @@ -35,78 +35,23 @@ #include <mpc86xx.h> #include <command.h> #include <asm/processor.h> -#include <ppc_asm.tmpl> -#include <watchdog.h> -unsigned long decrementer_count; /* count value for 1e6/HZ microseconds */ -unsigned long timestamp; - - -static __inline__ unsigned long get_msr(void) -{ - unsigned long msr; - - asm volatile ("mfmsr %0":"=r" (msr):); - - return msr; -} - -static __inline__ void set_msr(unsigned long msr) -{ - asm volatile ("mtmsr %0"::"r" (msr)); -} - -static __inline__ unsigned long get_dec(void) -{ - unsigned long val; - - asm volatile ("mfdec %0":"=r" (val):); - - return val; -} - -static __inline__ void set_dec(unsigned long val) -{ - if (val) - asm volatile ("mtdec %0"::"r" (val)); -} - -/* interrupt is not supported yet */ int interrupt_init_cpu(unsigned long *decrementer_count) { - return 0; -} - -int interrupt_init(void) -{ - int ret; - - volatile immap_t *immr = (immap_t *)CFG_IMMR; - immr->im_pic.gcr = MPC86xx_PICGCR_RST; - while (immr->im_pic.gcr & MPC86xx_PICGCR_RST); - immr->im_pic.gcr = MPC86xx_PICGCR_MODE; - - /* call cpu specific function from $(CPU)/interrupts.c */ - ret = interrupt_init_cpu(&decrementer_count); + volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR; + volatile ccsr_pic_t *pic = &immr->im_pic; - if (ret) - return ret; + pic->gcr = MPC86xx_PICGCR_RST; + while (pic->gcr & MPC86xx_PICGCR_RST) + ; + pic->gcr = MPC86xx_PICGCR_MODE; - decrementer_count = get_tbclk() / CFG_HZ; + *decrementer_count = get_tbclk() / CONFIG_SYS_HZ; debug("interrupt init: tbclk() = %d MHz, decrementer_count = %ld\n", (get_tbclk() / 1000000), - decrementer_count); - - set_dec(decrementer_count); - - set_msr(get_msr() | MSR_EE); - - debug("MSR = 0x%08lx, Decrementer reg = 0x%08lx\n", - get_msr(), - get_dec()); + *decrementer_count); #ifdef CONFIG_INTERRUPTS - volatile ccsr_pic_t *pic = &immr->im_pic; pic->iivpr1 = 0x810001; /* 50220 enable mcm interrupts */ debug("iivpr1@%x = %x\n", &pic->iivpr1, pic->iivpr1); @@ -132,25 +77,6 @@ int interrupt_init(void) return 0; } -void enable_interrupts(void) -{ - set_msr(get_msr() | MSR_EE); -} - -/* returns flag if MSR_EE was set before */ -int disable_interrupts(void) -{ - ulong msr = get_msr(); - - set_msr(msr & ~MSR_EE); - return (msr & MSR_EE) != 0; -} - -void increment_timestamp(void) -{ - timestamp++; -} - /* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. @@ -161,50 +87,9 @@ void timer_interrupt_cpu(struct pt_regs *regs) /* nothing to do here */ } -void timer_interrupt(struct pt_regs *regs) -{ - /* call cpu specific function from $(CPU)/interrupts.c */ - timer_interrupt_cpu(regs); - - timestamp++; - - /* Restore Decrementer Count */ - set_dec(decrementer_count); - -#if defined(CONFIG_WATCHDOG) || defined (CONFIG_HW_WATCHDOG) - if ((timestamp % (CFG_WATCHDOG_FREQ)) == 0) - WATCHDOG_RESET(); -#endif /* CONFIG_WATCHDOG || CONFIG_HW_WATCHDOG */ - -#ifdef CONFIG_STATUS_LED - status_led_tick(timestamp); -#endif /* CONFIG_STATUS_LED */ - -#ifdef CONFIG_SHOW_ACTIVITY - board_show_activity(timestamp); -#endif /* CONFIG_SHOW_ACTIVITY */ - -} - -void reset_timer(void) -{ - timestamp = 0; -} - -ulong get_timer(ulong base) -{ - return timestamp - base; -} - -void set_timer(ulong t) -{ - timestamp = t; -} - /* * Install and free a interrupt handler. Not implemented yet. */ - void irq_install_handler(int vec, interrupt_handler_t *handler, void *arg) { } @@ -218,8 +103,6 @@ void irq_free_handler(int vec) */ int do_irqinfo(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { - printf("\nInterrupt-unsupported:\n"); - return 0; } diff --git a/cpu/mpc86xx/mp.c b/cpu/mpc86xx/mp.c new file mode 100644 index 0000000..5014401 --- /dev/null +++ b/cpu/mpc86xx/mp.c @@ -0,0 +1,68 @@ +#include <common.h> +#include <asm/processor.h> +#include <asm/mmu.h> +#include <ioports.h> +#include <lmb.h> +#include <asm/io.h> +#include "mp.h" + +DECLARE_GLOBAL_DATA_PTR; + +#if (CONFIG_NUM_CPUS > 1) +void cpu_mp_lmb_reserve(struct lmb *lmb) +{ + u32 bootpg; + + /* if we have 4G or more of memory, put the boot page at 4Gb-1M */ + if ((u64)gd->ram_size > 0xfffff000) + bootpg = 0xfff00000; + else + bootpg = gd->ram_size - (1024 * 1024); + + /* tell u-boot we stole a page */ + lmb_reserve(lmb, bootpg, 4096); +} + +/* + * Copy the code for other cpus to execute into an + * aligned location accessible via BPTR + */ +void setup_mp(void) +{ + extern ulong __secondary_start_page; + ulong fixup = (ulong)&__secondary_start_page; + u32 bootpg; + u32 bootpg_va; + + /* + * If we have 4G or more of memory, put the boot page at 4Gb-1M. + * Otherwise, put it at the very end of RAM. + */ + if (gd->ram_size > 0xfffff000) + bootpg = 0xfff00000; + else + bootpg = gd->ram_size - (1024 * 1024); + + if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE) { + /* We're not covered by the DDR mapping, set up BAT */ + write_bat(DBAT7, CONFIG_SYS_SCRATCH_VA | BATU_BL_128K | + BATU_VS | BATU_VP, + bootpg | BATL_PP_RW | BATL_MEMCOHERENCE); + bootpg_va = CONFIG_SYS_SCRATCH_VA; + } else { + bootpg_va = bootpg; + } + + memcpy((void *)bootpg_va, (void *)fixup, 4096); + flush_cache(bootpg_va, 4096); + + /* remove the temporary BAT mapping */ + if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE) + write_bat(DBAT7, 0, 0); + + /* If the physical location of bootpg is not at fff00000, set BPTR */ + if (bootpg != 0xfff00000) + out_be32((uint *)(CONFIG_SYS_CCSRBAR + 0x20), 0x80000000 | + (bootpg >> 12)); +} +#endif diff --git a/cpu/mpc86xx/mp.h b/cpu/mpc86xx/mp.h new file mode 100644 index 0000000..886e0c8 --- /dev/null +++ b/cpu/mpc86xx/mp.h @@ -0,0 +1,7 @@ +#ifndef __MPC86XX_MP_H_ +#define __MPC86XX_MP_H_ + +void setup_mp(void); +void cpu_mp_lmb_reserve(struct lmb *lmb); + +#endif diff --git a/cpu/mpc86xx/release.S b/cpu/mpc86xx/release.S new file mode 100644 index 0000000..95efbb4 --- /dev/null +++ b/cpu/mpc86xx/release.S @@ -0,0 +1,169 @@ +/* + * Copyright 2004, 2007, 2008 Freescale Semiconductor. + * Srikanth Srinivasan <srikanth.srinivaan@freescale.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#include <config.h> +#include <mpc86xx.h> +#include <version.h> + +#include <ppc_asm.tmpl> +#include <ppc_defs.h> + +#include <asm/cache.h> +#include <asm/mmu.h> + +/* If this is a multi-cpu system then we need to handle the + * 2nd cpu. The assumption is that the 2nd cpu is being + * held in boot holdoff mode until the 1st cpu unlocks it + * from Linux. We'll do some basic cpu init and then pass + * it to the Linux Reset Vector. + * Sri: Much of this initialization is not required. Linux + * rewrites the bats, and the sprs and also enables the L1 cache. + * + * Core 0 must copy this to a 1M aligned region and set BPTR + * to point to it. + */ +#if (CONFIG_NUM_CPUS > 1) + .align 12 +.globl __secondary_start_page +__secondary_start_page: + .space 0x100 /* space over to reset vector loc */ + mfspr r0, MSSCR0 + andi. r0, r0, 0x0020 + rlwinm r0,r0,27,31,31 + mtspr PIR, r0 + + /* Invalidate BATs */ + li r0, 0 + mtspr IBAT0U, r0 + mtspr IBAT1U, r0 + mtspr IBAT2U, r0 + mtspr IBAT3U, r0 + mtspr IBAT4U, r0 + mtspr IBAT5U, r0 + mtspr IBAT6U, r0 + mtspr IBAT7U, r0 + isync + mtspr DBAT0U, r0 + mtspr DBAT1U, r0 + mtspr DBAT2U, r0 + mtspr DBAT3U, r0 + mtspr DBAT4U, r0 + mtspr DBAT5U, r0 + mtspr DBAT6U, r0 + mtspr DBAT7U, r0 + isync + sync + + /* enable extended addressing */ + mfspr r0, HID0 + lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h + ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l + mtspr HID0, r0 + sync + isync + +#ifdef CONFIG_SYS_L2 + /* init the L2 cache */ + addis r3, r0, L2_INIT@h + ori r3, r3, L2_INIT@l + sync + mtspr l2cr, r3 +#ifdef CONFIG_ALTIVEC + dssall +#endif + /* invalidate the L2 cache */ + mfspr r3, l2cr + rlwinm. r3, r3, 0, 0, 0 + beq 1f + + mfspr r3, l2cr + rlwinm r3, r3, 0, 1, 31 + +#ifdef CONFIG_ALTIVEC + dssall +#endif + sync + mtspr l2cr, r3 + sync +1: mfspr r3, l2cr + oris r3, r3, L2CR_L2I@h + mtspr l2cr, r3 + +invl2: + mfspr r3, l2cr + andis. r3, r3, L2CR_L2I@h + bne invl2 + sync +#endif + + /* enable and invalidate the data cache */ + mfspr r3, HID0 + li r5, HID0_DCFI|HID0_DLOCK + andc r3, r3, r5 + mtspr HID0, r3 /* no invalidate, unlock */ + ori r3, r3, HID0_DCE + ori r5, r3, HID0_DCFI + mtspr HID0, r5 /* enable + invalidate */ + mtspr HID0, r3 /* enable */ + sync +#ifdef CONFIG_SYS_L2 + sync + lis r3, L2_ENABLE@h + ori r3, r3, L2_ENABLE@l + mtspr l2cr, r3 + isync + sync +#endif + + /* enable and invalidate the instruction cache*/ + mfspr r3, HID0 + li r5, HID0_ICFI|HID0_ILOCK + andc r3, r3, r5 + ori r3, r3, HID0_ICE + ori r5, r3, HID0_ICFI + mtspr HID0, r5 + mtspr HID0, r3 + isync + sync + + /* TBEN in HID0 */ + mfspr r4, HID0 + oris r4, r4, 0x0400 + mtspr HID0, r4 + sync + isync + + /* MCP|SYNCBE|ABE in HID1 */ + mfspr r4, HID1 + oris r4, r4, 0x8000 + ori r4, r4, 0x0C00 + mtspr HID1, r4 + sync + isync + + lis r3, CONFIG_LINUX_RESET_VEC@h + ori r3, r3, CONFIG_LINUX_RESET_VEC@l + mtlr r3 + blr + + /* Never Returns, Running in Linux Now */ +#endif diff --git a/cpu/mpc86xx/speed.c b/cpu/mpc86xx/speed.c index da5b58b..415ac9d 100644 --- a/cpu/mpc86xx/speed.c +++ b/cpu/mpc86xx/speed.c @@ -36,7 +36,7 @@ extern unsigned long get_board_sys_clk(unsigned long dummy); void get_sys_info(sys_info_t *sysInfo) { - volatile immap_t *immap = (immap_t *) CFG_IMMR; + volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR; volatile ccsr_gur_t *gur = &immap->im_gur; uint plat_ratio, e600_ratio; diff --git a/cpu/mpc86xx/start.S b/cpu/mpc86xx/start.S index 03f2128..63cc8db 100644 --- a/cpu/mpc86xx/start.S +++ b/cpu/mpc86xx/start.S @@ -32,6 +32,7 @@ */ #include <config.h> #include <mpc86xx.h> +#include <timestamp.h> #include <version.h> #include <ppc_asm.tmpl> @@ -76,7 +77,7 @@ .globl version_string version_string: .ascii U_BOOT_VERSION - .ascii " (", __DATE__, " - ", __TIME__, ")" + .ascii " (", U_BOOT_DATE, " - ", U_BOOT_TIME, ")" .ascii CONFIG_IDENT_STRING, "\0" . = EXC_OFF_SYS_RESET @@ -179,22 +180,12 @@ _end_of_vectors: boot_cold: boot_warm: - - /* if this is a multi-core system we need to check which cpu - * this is, if it is not cpu 0 send the cpu to the linux reset - * vector */ -#if (CONFIG_NUM_CPUS > 1) - mfspr r0, MSSCR0 - andi. r0, r0, 0x0020 - rlwinm r0,r0,27,31,31 - mtspr PIR, r0 - beq 1f - - bl secondary_cpu_setup -#endif - + /* + * NOTE: Only Cpu 0 will ever come here. Other cores go to an + * address specified by the BPTR + */ 1: -#ifdef CFG_RAMBOOT +#ifdef CONFIG_SYS_RAMBOOT /* disable everything */ li r0, 0 mtspr HID0, r0 @@ -202,10 +193,14 @@ boot_warm: mtmsr 0 #endif + /* Invalidate BATs */ bl invalidate_bats sync + /* Invalidate all of TLB before MMU turn on */ + bl clear_tlbs + sync -#ifdef CFG_L2 +#ifdef CONFIG_SYS_L2 /* init the L2 cache */ lis r3, L2_INIT@h ori r3, r3, L2_INIT@l @@ -218,8 +213,8 @@ boot_warm: /* * Calculate absolute address in FLASH and jump there *------------------------------------------------------*/ - lis r3, CFG_MONITOR_BASE@h - ori r3, r3, CFG_MONITOR_BASE@l + lis r3, CONFIG_SYS_MONITOR_BASE_EARLY@h + ori r3, r3, CONFIG_SYS_MONITOR_BASE_EARLY@l addi r3, r3, in_flash - _start + EXC_OFF_SYS_RESET mtlr r3 blr @@ -245,9 +240,15 @@ in_flash: */ /* enable address translation */ - bl enable_addr_trans - sync + mfmsr r5 + ori r5, r5, (MSR_IR | MSR_DR) + lis r3,addr_trans_enabled@h + ori r3, r3, addr_trans_enabled@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r5 + rfi +addr_trans_enabled: /* enable and invalidate the data cache */ /* bl l1dcache_enable */ bl dcache_enable @@ -257,15 +258,19 @@ in_flash: bl icache_enable #endif -#ifdef CFG_INIT_RAM_LOCK +#ifdef CONFIG_SYS_INIT_RAM_LOCK bl lock_ram_in_cache sync #endif +#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR) + bl setup_ccsrbar +#endif + /* set up the stack pointer in our newly created * cache-ram (r1) */ - lis r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@h - ori r1, r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@l + lis r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@h + ori r1, r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@l li r0, 0 /* Make room for stack frame header and */ stwu r0, -4(r1) /* clear final stack frame so that */ @@ -273,16 +278,6 @@ in_flash: GET_GOT /* initialize GOT access */ - /* setup the rest of the bats */ - bl setup_bats - bl clear_tlbs - sync - -#if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR) - /* setup ccsrbar */ - bl setup_ccsrbar -#endif - /* run low-level CPU init code (from Flash) */ bl cpu_init_f sync @@ -290,7 +285,7 @@ in_flash: #ifdef RUN_DIAG /* Load PX_AUX register address in r4 */ - lis r4, 0xf810 + lis r4, PIXIS_BASE@h ori r4, r4, 0x6 /* Load contents of PX_AUX in r3 bits 24 to 31*/ lbz r3, 0(r4) @@ -308,8 +303,8 @@ in_flash: stb r3, 0(r4) /* Get the address to jump to in r3*/ - lis r3, CFG_DIAG_ADDR@h - ori r3, r3, CFG_DIAG_ADDR@l + lis r3, CONFIG_SYS_DIAG_ADDR@h + ori r3, r3, CONFIG_SYS_DIAG_ADDR@l /* Load the LR with the branch address */ mtlr r3 @@ -362,45 +357,83 @@ invalidate_bats: * early_bats: * * Set up bats needed early on - this is usually the BAT for the - * stack-in-cache and the Flash + * stack-in-cache, the Flash, and CCSR space */ .globl early_bats early_bats: + /* IBAT 3 */ + lis r4, CONFIG_SYS_IBAT3L@h + ori r4, r4, CONFIG_SYS_IBAT3L@l + lis r3, CONFIG_SYS_IBAT3U@h + ori r3, r3, CONFIG_SYS_IBAT3U@l + mtspr IBAT3L, r4 + mtspr IBAT3U, r3 + isync + + /* DBAT 3 */ + lis r4, CONFIG_SYS_DBAT3L@h + ori r4, r4, CONFIG_SYS_DBAT3L@l + lis r3, CONFIG_SYS_DBAT3U@h + ori r3, r3, CONFIG_SYS_DBAT3U@l + mtspr DBAT3L, r4 + mtspr DBAT3U, r3 + isync + /* IBAT 5 */ - lis r4, CFG_IBAT5L@h - ori r4, r4, CFG_IBAT5L@l - lis r3, CFG_IBAT5U@h - ori r3, r3, CFG_IBAT5U@l + lis r4, CONFIG_SYS_IBAT5L@h + ori r4, r4, CONFIG_SYS_IBAT5L@l + lis r3, CONFIG_SYS_IBAT5U@h + ori r3, r3, CONFIG_SYS_IBAT5U@l mtspr IBAT5L, r4 mtspr IBAT5U, r3 isync /* DBAT 5 */ - lis r4, CFG_DBAT5L@h - ori r4, r4, CFG_DBAT5L@l - lis r3, CFG_DBAT5U@h - ori r3, r3, CFG_DBAT5U@l + lis r4, CONFIG_SYS_DBAT5L@h + ori r4, r4, CONFIG_SYS_DBAT5L@l + lis r3, CONFIG_SYS_DBAT5U@h + ori r3, r3, CONFIG_SYS_DBAT5U@l mtspr DBAT5L, r4 mtspr DBAT5U, r3 isync /* IBAT 6 */ - lis r4, CFG_IBAT6L@h - ori r4, r4, CFG_IBAT6L@l - lis r3, CFG_IBAT6U@h - ori r3, r3, CFG_IBAT6U@l + lis r4, CONFIG_SYS_IBAT6L_EARLY@h + ori r4, r4, CONFIG_SYS_IBAT6L_EARLY@l + lis r3, CONFIG_SYS_IBAT6U_EARLY@h + ori r3, r3, CONFIG_SYS_IBAT6U_EARLY@l mtspr IBAT6L, r4 mtspr IBAT6U, r3 isync /* DBAT 6 */ - lis r4, CFG_DBAT6L@h - ori r4, r4, CFG_DBAT6L@l - lis r3, CFG_DBAT6U@h - ori r3, r3, CFG_DBAT6U@l + lis r4, CONFIG_SYS_DBAT6L_EARLY@h + ori r4, r4, CONFIG_SYS_DBAT6L_EARLY@l + lis r3, CONFIG_SYS_DBAT6U_EARLY@h + ori r3, r3, CONFIG_SYS_DBAT6U_EARLY@l mtspr DBAT6L, r4 mtspr DBAT6U, r3 isync + +#if(CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR) + /* IBAT 7 */ + lis r4, CONFIG_SYS_CCSR_DEFAULT_IBATL@h + ori r4, r4, CONFIG_SYS_CCSR_DEFAULT_IBATL@l + lis r3, CONFIG_SYS_CCSR_DEFAULT_IBATU@h + ori r3, r3, CONFIG_SYS_CCSR_DEFAULT_IBATU@l + mtspr IBAT7L, r4 + mtspr IBAT7U, r3 + isync + + /* DBAT 7 */ + lis r4, CONFIG_SYS_CCSR_DEFAULT_DBATL@h + ori r4, r4, CONFIG_SYS_CCSR_DEFAULT_DBATL@l + lis r3, CONFIG_SYS_CCSR_DEFAULT_DBATU@h + ori r3, r3, CONFIG_SYS_CCSR_DEFAULT_DBATU@l + mtspr DBAT7L, r4 + mtspr DBAT7U, r3 + isync +#endif blr .globl clear_tlbs @@ -416,15 +449,6 @@ tlblp: blt tlblp blr - .globl enable_addr_trans -enable_addr_trans: - /* enable address translation */ - mfmsr r5 - ori r5, r5, (MSR_IR | MSR_DR) - mtmsr r5 - isync - blr - .globl disable_addr_trans disable_addr_trans: /* disable address translation */ @@ -617,20 +641,19 @@ relocate_code: mr r1, r3 /* Set new stack pointer */ mr r9, r4 /* Save copy of Global Data pointer */ - mr r2, r9 /* Save for DECLARE_GLOBAL_DATA_PTR */ mr r10, r5 /* Save copy of Destination Address */ mr r3, r5 /* Destination Address */ - lis r4, CFG_MONITOR_BASE@h /* Source Address */ - ori r4, r4, CFG_MONITOR_BASE@l + lis r4, CONFIG_SYS_MONITOR_BASE@h /* Source Address */ + ori r4, r4, CONFIG_SYS_MONITOR_BASE@l lwz r5, GOT(__init_end) sub r5, r5, r4 - li r6, CFG_CACHELINE_SIZE /* Cache Line Size */ + li r6, CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ /* * Fix GOT pointer: * - * New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE) + Destination Address + * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address * * Offset: */ @@ -644,16 +667,6 @@ relocate_code: /* * Now relocate code */ -#ifdef CONFIG_ECC - bl board_relocate_rom - sync - mr r3, r10 /* Destination Address */ - lis r4, CFG_MONITOR_BASE@h /* Source Address */ - ori r4, r4, CFG_MONITOR_BASE@l - lwz r5, GOT(__init_end) - sub r5, r5, r4 - li r6, CFG_CACHELINE_SIZE /* Cache Line Size */ -#else cmplw cr1,r3,r4 addi r0,r5,3 srwi. r0,r0,2 @@ -675,7 +688,6 @@ relocate_code: 3: lwzu r0,-4(r8) stwu r0,-4(r7) bdnz 3b -#endif /* * Now flush the cache: note that we must start from a cache aligned * address. Otherwise we might miss one cache line. @@ -708,9 +720,6 @@ relocate_code: blr in_ram: -#ifdef CONFIG_ECC - bl board_init_ecc -#endif /* * Relocation Function, r14 point to got2+0x8000 * @@ -864,40 +873,43 @@ enable_ext_addr: isync blr -#if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR) +#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR) .globl setup_ccsrbar setup_ccsrbar: /* Special sequence needed to update CCSRBAR itself */ - lis r4, CFG_CCSRBAR_DEFAULT@h - ori r4, r4, CFG_CCSRBAR_DEFAULT@l - - lis r5, CFG_CCSRBAR@h - ori r5, r5, CFG_CCSRBAR@l - srwi r6,r5,12 - stw r6, 0(r4) + lis r4, CONFIG_SYS_CCSRBAR_DEFAULT@h + ori r4, r4, CONFIG_SYS_CCSRBAR_DEFAULT@l + + lis r5, CONFIG_SYS_CCSRBAR_PHYS_LOW@h + ori r5, r5, CONFIG_SYS_CCSRBAR_PHYS_LOW@l + srwi r5,r5,12 + li r6, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l + rlwimi r5,r6,20,8,11 + stw r5, 0(r4) /* Store physical value of CCSR */ isync - lis r5, 0xffff - ori r5,r5,0xf000 + lis r5, TEXT_BASE@h + ori r5,r5,TEXT_BASE@l lwz r5, 0(r5) isync - lis r3, CFG_CCSRBAR@h - lwz r5, CFG_CCSRBAR@l(r3) + /* Use VA of CCSR to do read */ + lis r3, CONFIG_SYS_CCSRBAR@h + lwz r5, CONFIG_SYS_CCSRBAR@l(r3) isync blr #endif -#ifdef CFG_INIT_RAM_LOCK +#ifdef CONFIG_SYS_INIT_RAM_LOCK lock_ram_in_cache: /* Allocate Initial RAM in data cache. */ - lis r3, (CFG_INIT_RAM_ADDR & ~31)@h - ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l - li r2, ((CFG_INIT_RAM_END & ~31) + \ - (CFG_INIT_RAM_ADDR & 31) + 31) / 32 - mtctr r2 + lis r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@h + ori r3, r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@l + li r4, ((CONFIG_SYS_INIT_RAM_END & ~31) + \ + (CONFIG_SYS_INIT_RAM_ADDR & 31) + 31) / 32 + mtctr r4 1: dcbz r0, r3 addi r3, r3, 32 @@ -928,11 +940,11 @@ lock_ram_in_cache: .globl unlock_ram_in_cache unlock_ram_in_cache: /* invalidate the INIT_RAM section */ - lis r3, (CFG_INIT_RAM_ADDR & ~31)@h - ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l - li r2, ((CFG_INIT_RAM_END & ~31) + \ - (CFG_INIT_RAM_ADDR & 31) + 31) / 32 - mtctr r2 + lis r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@h + ori r3, r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@l + li r4, ((CONFIG_SYS_INIT_RAM_END & ~31) + \ + (CONFIG_SYS_INIT_RAM_ADDR & 31) + 31) / 32 + mtctr r4 1: icbi r0, r3 addi r3, r3, 32 bdnz 1b @@ -970,64 +982,3 @@ unlock_ram_in_cache: blr #endif #endif - -/* If this is a multi-cpu system then we need to handle the - * 2nd cpu. The assumption is that the 2nd cpu is being - * held in boot holdoff mode until the 1st cpu unlocks it - * from Linux. We'll do some basic cpu init and then pass - * it to the Linux Reset Vector. - * Sri: Much of this initialization is not required. Linux - * rewrites the bats, and the sprs and also enables the L1 cache. - */ -#if (CONFIG_NUM_CPUS > 1) -.globl secondary_cpu_setup -secondary_cpu_setup: - /* Do only core setup on all cores except cpu0 */ - bl invalidate_bats - sync - bl enable_ext_addr - -#ifdef CFG_L2 - /* init the L2 cache */ - addis r3, r0, L2_INIT@h - ori r3, r3, L2_INIT@l - sync - mtspr l2cr, r3 -#ifdef CONFIG_ALTIVEC - dssall -#endif - /* invalidate the L2 cache */ - bl l2cache_invalidate - sync -#endif - - /* enable and invalidate the data cache */ - bl dcache_enable - sync - - /* enable and invalidate the instruction cache*/ - bl icache_enable - sync - - /* TBEN in HID0 */ - mfspr r4, HID0 - oris r4, r4, 0x0400 - mtspr HID0, r4 - sync - isync - - /* MCP|SYNCBE|ABE in HID1 */ - mfspr r4, HID1 - oris r4, r4, 0x8000 - ori r4, r4, 0x0C00 - mtspr HID1, r4 - sync - isync - - lis r3, CONFIG_LINUX_RESET_VEC@h - ori r3, r3, CONFIG_LINUX_RESET_VEC@l - mtlr r3 - blr - - /* Never Returns, Running in Linux Now */ -#endif |