summaryrefslogtreecommitdiff
path: root/cpu/mpc86xx
diff options
context:
space:
mode:
Diffstat (limited to 'cpu/mpc86xx')
-rw-r--r--cpu/mpc86xx/Makefile4
-rw-r--r--cpu/mpc86xx/cpu_init.c8
-rw-r--r--cpu/mpc86xx/fdt.c21
-rw-r--r--cpu/mpc86xx/mp.c68
-rw-r--r--cpu/mpc86xx/mp.h7
-rw-r--r--cpu/mpc86xx/release.S169
-rw-r--r--cpu/mpc86xx/start.S186
7 files changed, 352 insertions, 111 deletions
diff --git a/cpu/mpc86xx/Makefile b/cpu/mpc86xx/Makefile
index a9767ad..34a9755 100644
--- a/cpu/mpc86xx/Makefile
+++ b/cpu/mpc86xx/Makefile
@@ -31,6 +31,10 @@ LIB = $(obj)lib$(CPU).a
START = start.o
SOBJS = cache.o
+ifneq ($(CONFIG_NUM_CPUS),1)
+COBJS-y += mp.o
+SOBJS += release.o
+endif
COBJS-y += traps.o
COBJS-y += cpu.o
COBJS-y += cpu_init.o
diff --git a/cpu/mpc86xx/cpu_init.c b/cpu/mpc86xx/cpu_init.c
index 4ab88f0..a7e6036 100644
--- a/cpu/mpc86xx/cpu_init.c
+++ b/cpu/mpc86xx/cpu_init.c
@@ -31,6 +31,9 @@
#include <mpc86xx.h>
#include <asm/mmu.h>
#include <asm/fsl_law.h>
+#include "mp.h"
+
+void setup_bats(void);
DECLARE_GLOBAL_DATA_PTR;
@@ -56,6 +59,8 @@ void cpu_init_f(void)
init_laws();
#endif
+ setup_bats();
+
/* Map banks 0 and 1 to the FLASH banks 0 and 1 at preliminary
* addresses - these have to be modified later when FLASH size
* has been determined
@@ -121,6 +126,9 @@ void cpu_init_f(void)
*/
int cpu_init_r(void)
{
+#if (CONFIG_NUM_CPUS > 1)
+ setup_mp();
+#endif
return 0;
}
diff --git a/cpu/mpc86xx/fdt.c b/cpu/mpc86xx/fdt.c
index 1fef94f5..3adfad9 100644
--- a/cpu/mpc86xx/fdt.c
+++ b/cpu/mpc86xx/fdt.c
@@ -9,9 +9,17 @@
#include <common.h>
#include <libfdt.h>
#include <fdt_support.h>
+#include "mp.h"
+
+DECLARE_GLOBAL_DATA_PTR;
void ft_cpu_setup(void *blob, bd_t *bd)
{
+#if (CONFIG_NUM_CPUS > 1)
+ int off;
+ u32 bootpg;
+#endif
+
do_fixup_by_prop_u32(blob, "device_type", "cpu", 4,
"timebase-frequency", bd->bi_busfreq / 4, 1);
do_fixup_by_prop_u32(blob, "device_type", "cpu", 4,
@@ -32,4 +40,17 @@ void ft_cpu_setup(void *blob, bd_t *bd)
do_fixup_by_compat_u32(blob, "ns16550",
"clock-frequency", CONFIG_SYS_NS16550_CLK, 1);
#endif
+
+#if (CONFIG_NUM_CPUS > 1)
+ /* if we have 4G or more of memory, put the boot page at 4Gb-1M */
+ if (gd->ram_size > 0xfffff000)
+ bootpg = 0xfff00000;
+ else
+ bootpg = gd->ram_size - (1024 * 1024);
+
+ /* Reserve the boot page so OSes dont use it */
+ off = fdt_add_mem_rsv(blob, bootpg, (u64)4096);
+ if (off < 0)
+ printf("%s: %s\n", __FUNCTION__, fdt_strerror(off));
+#endif
}
diff --git a/cpu/mpc86xx/mp.c b/cpu/mpc86xx/mp.c
new file mode 100644
index 0000000..5014401
--- /dev/null
+++ b/cpu/mpc86xx/mp.c
@@ -0,0 +1,68 @@
+#include <common.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <ioports.h>
+#include <lmb.h>
+#include <asm/io.h>
+#include "mp.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#if (CONFIG_NUM_CPUS > 1)
+void cpu_mp_lmb_reserve(struct lmb *lmb)
+{
+ u32 bootpg;
+
+ /* if we have 4G or more of memory, put the boot page at 4Gb-1M */
+ if ((u64)gd->ram_size > 0xfffff000)
+ bootpg = 0xfff00000;
+ else
+ bootpg = gd->ram_size - (1024 * 1024);
+
+ /* tell u-boot we stole a page */
+ lmb_reserve(lmb, bootpg, 4096);
+}
+
+/*
+ * Copy the code for other cpus to execute into an
+ * aligned location accessible via BPTR
+ */
+void setup_mp(void)
+{
+ extern ulong __secondary_start_page;
+ ulong fixup = (ulong)&__secondary_start_page;
+ u32 bootpg;
+ u32 bootpg_va;
+
+ /*
+ * If we have 4G or more of memory, put the boot page at 4Gb-1M.
+ * Otherwise, put it at the very end of RAM.
+ */
+ if (gd->ram_size > 0xfffff000)
+ bootpg = 0xfff00000;
+ else
+ bootpg = gd->ram_size - (1024 * 1024);
+
+ if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE) {
+ /* We're not covered by the DDR mapping, set up BAT */
+ write_bat(DBAT7, CONFIG_SYS_SCRATCH_VA | BATU_BL_128K |
+ BATU_VS | BATU_VP,
+ bootpg | BATL_PP_RW | BATL_MEMCOHERENCE);
+ bootpg_va = CONFIG_SYS_SCRATCH_VA;
+ } else {
+ bootpg_va = bootpg;
+ }
+
+ memcpy((void *)bootpg_va, (void *)fixup, 4096);
+ flush_cache(bootpg_va, 4096);
+
+ /* remove the temporary BAT mapping */
+ if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE)
+ write_bat(DBAT7, 0, 0);
+
+ /* If the physical location of bootpg is not at fff00000, set BPTR */
+ if (bootpg != 0xfff00000)
+ out_be32((uint *)(CONFIG_SYS_CCSRBAR + 0x20), 0x80000000 |
+ (bootpg >> 12));
+}
+#endif
diff --git a/cpu/mpc86xx/mp.h b/cpu/mpc86xx/mp.h
new file mode 100644
index 0000000..886e0c8
--- /dev/null
+++ b/cpu/mpc86xx/mp.h
@@ -0,0 +1,7 @@
+#ifndef __MPC86XX_MP_H_
+#define __MPC86XX_MP_H_
+
+void setup_mp(void);
+void cpu_mp_lmb_reserve(struct lmb *lmb);
+
+#endif
diff --git a/cpu/mpc86xx/release.S b/cpu/mpc86xx/release.S
new file mode 100644
index 0000000..b524e50
--- /dev/null
+++ b/cpu/mpc86xx/release.S
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2004, 2007, 2008 Freescale Semiconductor.
+ * Srikanth Srinivasan <srikanth.srinivaan@freescale.com>
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <config.h>
+#include <mpc86xx.h>
+#include <version.h>
+
+#include <ppc_asm.tmpl>
+#include <ppc_defs.h>
+
+#include <asm/cache.h>
+#include <asm/mmu.h>
+
+/* If this is a multi-cpu system then we need to handle the
+ * 2nd cpu. The assumption is that the 2nd cpu is being
+ * held in boot holdoff mode until the 1st cpu unlocks it
+ * from Linux. We'll do some basic cpu init and then pass
+ * it to the Linux Reset Vector.
+ * Sri: Much of this initialization is not required. Linux
+ * rewrites the bats, and the sprs and also enables the L1 cache.
+ *
+ * Core 0 must copy this to a 1M aligned region and set BPTR
+ * to point to it.
+ */
+#if (CONFIG_NUM_CPUS > 1)
+ .align 12
+.globl __secondary_start_page
+__secondary_start_page:
+ .space 0x100 /* space over to reset vector loc */
+ mfspr r0, MSSCR0
+ andi. r0, r0, 0x0020
+ rlwinm r0,r0,27,31,31
+ mtspr PIR, r0
+
+ /* Invalidate BATs */
+ li r0, 0
+ mtspr IBAT0U, r0
+ mtspr IBAT1U, r0
+ mtspr IBAT2U, r0
+ mtspr IBAT3U, r0
+ mtspr IBAT4U, r0
+ mtspr IBAT5U, r0
+ mtspr IBAT6U, r0
+ mtspr IBAT7U, r0
+ isync
+ mtspr DBAT0U, r0
+ mtspr DBAT1U, r0
+ mtspr DBAT2U, r0
+ mtspr DBAT3U, r0
+ mtspr DBAT4U, r0
+ mtspr DBAT5U, r0
+ mtspr DBAT6U, r0
+ mtspr DBAT7U, r0
+ isync
+ sync
+
+ /* enable extended addressing */
+ mfspr r0, HID0
+ lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h
+ ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l
+ mtspr HID0, r0
+ sync
+ isync
+
+#ifdef CONFIG_SYS_L2
+ /* init the L2 cache */
+ addis r3, r0, L2_INIT@h
+ ori r3, r3, L2_INIT@l
+ sync
+ mtspr l2cr, r3
+#ifdef CONFIG_ALTIVEC
+ dssall
+#endif
+ /* invalidate the L2 cache */
+ mfspr r3, l2cr
+ rlwinm. r3, r3, 0, 0, 0
+ beq 1f
+
+ mfspr r3, l2cr
+ rlwinm r3, r3, 0, 1, 31
+
+#ifdef CONFIG_ALTIVEC
+ dssall
+#endif
+ sync
+ mtspr l2cr, r3
+ sync
+1: mfspr r3, l2cr
+ oris r3, r3, L2CR_L2I@h
+ mtspr l2cr, r3
+
+invl2:
+ mfspr r3, l2cr
+ andis. r3, r3, L2CR_L2I@h
+ bne invl2
+ sync
+#endif
+
+ /* enable and invalidate the data cache */
+ mfspr r3, HID0
+ li r5, HID0_DCFI|HID0_DLOCK
+ andc r3, r3, r5
+ mtspr HID0, r3 /* no invalidate, unlock */
+ ori r3, r3, HID0_DCE
+ ori r5, r3, HID0_DCFI
+ mtspr HID0, r5 /* enable + invalidate */
+ mtspr HID0, r3 /* enable */
+ sync
+#ifdef CFG_L2
+ sync
+ lis r3, L2_ENABLE@h
+ ori r3, r3, L2_ENABLE@l
+ mtspr l2cr, r3
+ isync
+ sync
+#endif
+
+ /* enable and invalidate the instruction cache*/
+ mfspr r3, HID0
+ li r5, HID0_ICFI|HID0_ILOCK
+ andc r3, r3, r5
+ ori r3, r3, HID0_ICE
+ ori r5, r3, HID0_ICFI
+ mtspr HID0, r5
+ mtspr HID0, r3
+ isync
+ sync
+
+ /* TBEN in HID0 */
+ mfspr r4, HID0
+ oris r4, r4, 0x0400
+ mtspr HID0, r4
+ sync
+ isync
+
+ /* MCP|SYNCBE|ABE in HID1 */
+ mfspr r4, HID1
+ oris r4, r4, 0x8000
+ ori r4, r4, 0x0C00
+ mtspr HID1, r4
+ sync
+ isync
+
+ lis r3, CONFIG_LINUX_RESET_VEC@h
+ ori r3, r3, CONFIG_LINUX_RESET_VEC@l
+ mtlr r3
+ blr
+
+ /* Never Returns, Running in Linux Now */
+#endif
diff --git a/cpu/mpc86xx/start.S b/cpu/mpc86xx/start.S
index 75e4317..efd654c 100644
--- a/cpu/mpc86xx/start.S
+++ b/cpu/mpc86xx/start.S
@@ -179,20 +179,10 @@ _end_of_vectors:
boot_cold:
boot_warm:
-
- /* if this is a multi-core system we need to check which cpu
- * this is, if it is not cpu 0 send the cpu to the linux reset
- * vector */
-#if (CONFIG_NUM_CPUS > 1)
- mfspr r0, MSSCR0
- andi. r0, r0, 0x0020
- rlwinm r0,r0,27,31,31
- mtspr PIR, r0
- beq 1f
-
- bl secondary_cpu_setup
-#endif
-
+ /*
+ * NOTE: Only Cpu 0 will ever come here. Other cores go to an
+ * address specified by the BPTR
+ */
1:
#ifdef CONFIG_SYS_RAMBOOT
/* disable everything */
@@ -222,8 +212,8 @@ boot_warm:
/*
* Calculate absolute address in FLASH and jump there
*------------------------------------------------------*/
- lis r3, CONFIG_SYS_MONITOR_BASE@h
- ori r3, r3, CONFIG_SYS_MONITOR_BASE@l
+ lis r3, CONFIG_SYS_MONITOR_BASE_EARLY@h
+ ori r3, r3, CONFIG_SYS_MONITOR_BASE_EARLY@l
addi r3, r3, in_flash - _start + EXC_OFF_SYS_RESET
mtlr r3
blr
@@ -249,9 +239,15 @@ in_flash:
*/
/* enable address translation */
- bl enable_addr_trans
- sync
+ mfmsr r5
+ ori r5, r5, (MSR_IR | MSR_DR)
+ lis r3,addr_trans_enabled@h
+ ori r3, r3, addr_trans_enabled@l
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r5
+ rfi
+addr_trans_enabled:
/* enable and invalidate the data cache */
/* bl l1dcache_enable */
bl dcache_enable
@@ -266,6 +262,10 @@ in_flash:
sync
#endif
+#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR)
+ bl setup_ccsrbar
+#endif
+
/* set up the stack pointer in our newly created
* cache-ram (r1) */
lis r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@h
@@ -277,15 +277,6 @@ in_flash:
GET_GOT /* initialize GOT access */
- /* setup the rest of the bats */
- bl setup_bats
- sync
-
-#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR)
- /* setup ccsrbar */
- bl setup_ccsrbar
-#endif
-
/* run low-level CPU init code (from Flash) */
bl cpu_init_f
sync
@@ -293,7 +284,7 @@ in_flash:
#ifdef RUN_DIAG
/* Load PX_AUX register address in r4 */
- lis r4, 0xf810
+ lis r4, PIXIS_BASE@h
ori r4, r4, 0x6
/* Load contents of PX_AUX in r3 bits 24 to 31*/
lbz r3, 0(r4)
@@ -365,10 +356,28 @@ invalidate_bats:
* early_bats:
*
* Set up bats needed early on - this is usually the BAT for the
- * stack-in-cache and the Flash
+ * stack-in-cache, the Flash, and CCSR space
*/
.globl early_bats
early_bats:
+ /* IBAT 3 */
+ lis r4, CONFIG_SYS_IBAT3L@h
+ ori r4, r4, CONFIG_SYS_IBAT3L@l
+ lis r3, CONFIG_SYS_IBAT3U@h
+ ori r3, r3, CONFIG_SYS_IBAT3U@l
+ mtspr IBAT3L, r4
+ mtspr IBAT3U, r3
+ isync
+
+ /* DBAT 3 */
+ lis r4, CONFIG_SYS_DBAT3L@h
+ ori r4, r4, CONFIG_SYS_DBAT3L@l
+ lis r3, CONFIG_SYS_DBAT3U@h
+ ori r3, r3, CONFIG_SYS_DBAT3U@l
+ mtspr DBAT3L, r4
+ mtspr DBAT3U, r3
+ isync
+
/* IBAT 5 */
lis r4, CONFIG_SYS_IBAT5L@h
ori r4, r4, CONFIG_SYS_IBAT5L@l
@@ -388,22 +397,42 @@ early_bats:
isync
/* IBAT 6 */
- lis r4, CONFIG_SYS_IBAT6L@h
- ori r4, r4, CONFIG_SYS_IBAT6L@l
- lis r3, CONFIG_SYS_IBAT6U@h
- ori r3, r3, CONFIG_SYS_IBAT6U@l
+ lis r4, CONFIG_SYS_IBAT6L_EARLY@h
+ ori r4, r4, CONFIG_SYS_IBAT6L_EARLY@l
+ lis r3, CONFIG_SYS_IBAT6U_EARLY@h
+ ori r3, r3, CONFIG_SYS_IBAT6U_EARLY@l
mtspr IBAT6L, r4
mtspr IBAT6U, r3
isync
/* DBAT 6 */
- lis r4, CONFIG_SYS_DBAT6L@h
- ori r4, r4, CONFIG_SYS_DBAT6L@l
- lis r3, CONFIG_SYS_DBAT6U@h
- ori r3, r3, CONFIG_SYS_DBAT6U@l
+ lis r4, CONFIG_SYS_DBAT6L_EARLY@h
+ ori r4, r4, CONFIG_SYS_DBAT6L_EARLY@l
+ lis r3, CONFIG_SYS_DBAT6U_EARLY@h
+ ori r3, r3, CONFIG_SYS_DBAT6U_EARLY@l
mtspr DBAT6L, r4
mtspr DBAT6U, r3
isync
+
+#if(CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR)
+ /* IBAT 7 */
+ lis r4, CONFIG_SYS_CCSR_DEFAULT_IBATL@h
+ ori r4, r4, CONFIG_SYS_CCSR_DEFAULT_IBATL@l
+ lis r3, CONFIG_SYS_CCSR_DEFAULT_IBATU@h
+ ori r3, r3, CONFIG_SYS_CCSR_DEFAULT_IBATU@l
+ mtspr IBAT7L, r4
+ mtspr IBAT7U, r3
+ isync
+
+ /* DBAT 7 */
+ lis r4, CONFIG_SYS_CCSR_DEFAULT_DBATL@h
+ ori r4, r4, CONFIG_SYS_CCSR_DEFAULT_DBATL@l
+ lis r3, CONFIG_SYS_CCSR_DEFAULT_DBATU@h
+ ori r3, r3, CONFIG_SYS_CCSR_DEFAULT_DBATU@l
+ mtspr DBAT7L, r4
+ mtspr DBAT7U, r3
+ isync
+#endif
blr
.globl clear_tlbs
@@ -419,15 +448,6 @@ tlblp:
blt tlblp
blr
- .globl enable_addr_trans
-enable_addr_trans:
- /* enable address translation */
- mfmsr r5
- ori r5, r5, (MSR_IR | MSR_DR)
- mtmsr r5
- isync
- blr
-
.globl disable_addr_trans
disable_addr_trans:
/* disable address translation */
@@ -859,17 +879,20 @@ setup_ccsrbar:
lis r4, CONFIG_SYS_CCSRBAR_DEFAULT@h
ori r4, r4, CONFIG_SYS_CCSRBAR_DEFAULT@l
- lis r5, CONFIG_SYS_CCSRBAR@h
- ori r5, r5, CONFIG_SYS_CCSRBAR@l
- srwi r6,r5,12
- stw r6, 0(r4)
+ lis r5, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
+ ori r5, r5, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
+ srwi r5,r5,12
+ li r6, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
+ rlwimi r5,r6,20,8,11
+ stw r5, 0(r4) /* Store physical value of CCSR */
isync
- lis r5, 0xffff
- ori r5,r5,0xf000
+ lis r5, TEXT_BASE@h
+ ori r5,r5,TEXT_BASE@l
lwz r5, 0(r5)
isync
+ /* Use VA of CCSR to do read */
lis r3, CONFIG_SYS_CCSRBAR@h
lwz r5, CONFIG_SYS_CCSRBAR@l(r3)
isync
@@ -959,63 +982,4 @@ unlock_ram_in_cache:
#endif
#endif
-/* If this is a multi-cpu system then we need to handle the
- * 2nd cpu. The assumption is that the 2nd cpu is being
- * held in boot holdoff mode until the 1st cpu unlocks it
- * from Linux. We'll do some basic cpu init and then pass
- * it to the Linux Reset Vector.
- * Sri: Much of this initialization is not required. Linux
- * rewrites the bats, and the sprs and also enables the L1 cache.
- */
-#if (CONFIG_NUM_CPUS > 1)
-.globl secondary_cpu_setup
-secondary_cpu_setup:
- /* Do only core setup on all cores except cpu0 */
- bl invalidate_bats
- sync
- bl enable_ext_addr
-
-#ifdef CONFIG_SYS_L2
- /* init the L2 cache */
- addis r3, r0, L2_INIT@h
- ori r3, r3, L2_INIT@l
- sync
- mtspr l2cr, r3
-#ifdef CONFIG_ALTIVEC
- dssall
-#endif
- /* invalidate the L2 cache */
- bl l2cache_invalidate
- sync
-#endif
- /* enable and invalidate the data cache */
- bl dcache_enable
- sync
-
- /* enable and invalidate the instruction cache*/
- bl icache_enable
- sync
-
- /* TBEN in HID0 */
- mfspr r4, HID0
- oris r4, r4, 0x0400
- mtspr HID0, r4
- sync
- isync
-
- /* MCP|SYNCBE|ABE in HID1 */
- mfspr r4, HID1
- oris r4, r4, 0x8000
- ori r4, r4, 0x0C00
- mtspr HID1, r4
- sync
- isync
-
- lis r3, CONFIG_LINUX_RESET_VEC@h
- ori r3, r3, CONFIG_LINUX_RESET_VEC@l
- mtlr r3
- blr
-
- /* Never Returns, Running in Linux Now */
-#endif