summaryrefslogtreecommitdiff
path: root/arch/powerpc/cpu
diff options
context:
space:
mode:
authorAndy Fleming <afleming@freescale.com>2013-03-25 07:33:14 +0000
committerAndy Fleming <afleming@freescale.com>2013-05-14 16:00:25 -0500
commit3e4c3137d6aefaf45a87bbad701fc336f3f24a3d (patch)
treec54a1d50c43176f8ff2ae429e4c8777780655459 /arch/powerpc/cpu
parent7ff8c7ce35c4468aac02f1ab4468e23384564eb0 (diff)
downloadu-boot-imx-3e4c3137d6aefaf45a87bbad701fc336f3f24a3d.zip
u-boot-imx-3e4c3137d6aefaf45a87bbad701fc336f3f24a3d.tar.gz
u-boot-imx-3e4c3137d6aefaf45a87bbad701fc336f3f24a3d.tar.bz2
e6500: Move L1 enablement after L2 enablement
The L1 D-cache on e6500 is write-through. This means that it's not considered a good idea to have the L1 up and running if the L2 is disabled. We don't actually *use* the L1 until after the L2 is brought up on e6500, so go ahead and move the L1 enablement after that code is done. Signed-off-by: Andy Fleming <afleming@freescale.com>
Diffstat (limited to 'arch/powerpc/cpu')
-rw-r--r--arch/powerpc/cpu/mpc85xx/start.S94
1 files changed, 47 insertions, 47 deletions
diff --git a/arch/powerpc/cpu/mpc85xx/start.S b/arch/powerpc/cpu/mpc85xx/start.S
index 2ce5505..5542d0a 100644
--- a/arch/powerpc/cpu/mpc85xx/start.S
+++ b/arch/powerpc/cpu/mpc85xx/start.S
@@ -173,52 +173,6 @@ l2_disabled:
mfspr r1,DBSR
mtspr DBSR,r1 /* Clear all valid bits */
- /*
- * Enable L1 Caches early
- *
- */
-
-#ifdef CONFIG_SYS_CACHE_STASHING
- /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
- li r2,(32 + 0)
- mtspr L1CSR2,r2
-#endif
-
- /* Enable/invalidate the I-Cache */
- lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
- ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
- mtspr SPRN_L1CSR1,r2
-1:
- mfspr r3,SPRN_L1CSR1
- and. r1,r3,r2
- bne 1b
-
- lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
- ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
- mtspr SPRN_L1CSR1,r3
- isync
-2:
- mfspr r3,SPRN_L1CSR1
- andi. r1,r3,L1CSR1_ICE@l
- beq 2b
-
- /* Enable/invalidate the D-Cache */
- lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
- ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
- mtspr SPRN_L1CSR0,r2
-1:
- mfspr r3,SPRN_L1CSR0
- and. r1,r3,r2
- bne 1b
-
- lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h
- ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
- mtspr SPRN_L1CSR0,r3
- isync
-2:
- mfspr r3,SPRN_L1CSR0
- andi. r1,r3,L1CSR0_DCE@l
- beq 2b
.macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
@@ -782,11 +736,57 @@ enable_l2_cluster_l2:
bne 1b
lis r4, L2CSR0_L2E@h
sync
- stw r4, 0(r3) /* eanble L2 */
+ stw r4, 0(r3) /* enable L2 */
delete_ccsr_l2_tlb:
delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
#endif
+ /*
+ * Enable the L1. On e6500, this has to be done
+ * after the L2 is up.
+ */
+
+#ifdef CONFIG_SYS_CACHE_STASHING
+ /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
+ li r2,(32 + 0)
+ mtspr L1CSR2,r2
+#endif
+
+ /* Enable/invalidate the I-Cache */
+ lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
+ ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
+ mtspr SPRN_L1CSR1,r2
+1:
+ mfspr r3,SPRN_L1CSR1
+ and. r1,r3,r2
+ bne 1b
+
+ lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
+ ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
+ mtspr SPRN_L1CSR1,r3
+ isync
+2:
+ mfspr r3,SPRN_L1CSR1
+ andi. r1,r3,L1CSR1_ICE@l
+ beq 2b
+
+ /* Enable/invalidate the D-Cache */
+ lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
+ ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
+ mtspr SPRN_L1CSR0,r2
+1:
+ mfspr r3,SPRN_L1CSR0
+ and. r1,r3,r2
+ bne 1b
+
+ lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h
+ ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
+ mtspr SPRN_L1CSR0,r3
+ isync
+2:
+ mfspr r3,SPRN_L1CSR0
+ andi. r1,r3,L1CSR0_DCE@l
+ beq 2b
#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
#define LAW_SIZE_1M 0x13