summaryrefslogtreecommitdiff
path: root/arch/powerpc/cpu/mpc85xx/start.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/cpu/mpc85xx/start.S')
-rw-r--r--arch/powerpc/cpu/mpc85xx/start.S376
1 files changed, 332 insertions, 44 deletions
diff --git a/arch/powerpc/cpu/mpc85xx/start.S b/arch/powerpc/cpu/mpc85xx/start.S
index 6aabc30..9e04257 100644
--- a/arch/powerpc/cpu/mpc85xx/start.S
+++ b/arch/powerpc/cpu/mpc85xx/start.S
@@ -86,6 +86,35 @@ _start_e500:
li r1,MSR_DE
mtmsr r1
+#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
+ mfspr r3,SPRN_SVR
+ rlwinm r3,r3,0,0xff
+ li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
+ cmpw r3,r4
+ beq 1f
+
+#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
+ li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
+ cmpw r3,r4
+ beq 1f
+#endif
+
+ /* Not a supported revision affected by erratum */
+ li r27,0
+ b 2f
+
+1: li r27,1 /* Remember for later that we have the erratum */
+ /* Erratum says set bits 55:60 to 001001 */
+ msync
+ isync
+ mfspr r3,976
+ li r4,0x48
+ rlwimi r3,r4,0,0x1f8
+ mtspr 976,r3
+ isync
+2:
+#endif
+
#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC)
/* ISBC uses L2 as stack.
* Disable L2 cache here so that u-boot can enable it later
@@ -406,12 +435,11 @@ l2_disabled:
* Search for the TLB that covers the code we're executing, and shrink it
* so that it covers only this 4K page. That will ensure that any other
* TLB we create won't interfere with it. We assume that the TLB exists,
- * which is why we don't check the Valid bit of MAS1.
+ * which is why we don't check the Valid bit of MAS1. We also assume
+ * it is in TLB1.
*
* This is necessary, for example, when booting from the on-chip ROM,
* which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
- * If we don't shrink this TLB now, then we'll accidentally delete it
- * in "purge_old_ccsr_tlb" below.
*/
bl nexti /* Find our address */
nexti: mflr r1 /* R1 = our PC */
@@ -421,11 +449,15 @@ nexti: mflr r1 /* R1 = our PC */
msync
tlbsx 0, r1 /* This must succeed */
+ mfspr r14, MAS0 /* Save ESEL for later */
+ rlwinm r14, r14, 16, 0xfff
+
/* Set the size of the TLB to 4KB */
mfspr r3, MAS1
li r2, 0xF00
andc r3, r3, r2 /* Clear the TSIZE bits */
ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
+ oris r3, r3, MAS1_IPROT@h
mtspr MAS1, r3
/*
@@ -440,6 +472,14 @@ nexti: mflr r1 /* R1 = our PC */
mfspr r2, MAS2
andc r2, r2, r3
or r2, r2, r1
+#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
+ cmpwi r27,0
+ beq 1f
+ andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
+ rlwinm r2, r2, 0, ~MAS2_I
+ ori r2, r2, MAS2_G
+1:
+#endif
mtspr MAS2, r2 /* Set the EPN to our PC base address */
mfspr r2, MAS3
@@ -452,6 +492,39 @@ nexti: mflr r1 /* R1 = our PC */
tlbwe
/*
+ * Clear out any other TLB entries that may exist, to avoid conflicts.
+ * Our TLB entry is in r14.
+ */
+ li r0, TLBIVAX_ALL | TLBIVAX_TLB0
+ tlbivax 0, r0
+ tlbsync
+
+ mfspr r4, SPRN_TLB1CFG
+ rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK
+
+ li r3, 0
+ mtspr MAS1, r3
+1: cmpw r3, r14
+#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(CONFIG_NAND_SPL)
+ cmpwi cr1, r3, CONFIG_SYS_PPC_E500_DEBUG_TLB
+ cror cr0*4+eq, cr0*4+eq, cr1*4+eq
+#endif
+ rlwinm r5, r3, 16, MAS0_ESEL_MSK
+ addi r3, r3, 1
+ beq 2f /* skip the entry we're executing from */
+
+ oris r5, r5, MAS0_TLBSEL(1)@h
+ mtspr MAS0, r5
+
+ isync
+ tlbwe
+ isync
+ msync
+
+2: cmpw r3, r4
+ blt 1b
+
+/*
* Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default
* location is not where we want it. This typically happens on a 36-bit
* system, where we want to move CCSR to near the top of 36-bit address space.
@@ -469,41 +542,15 @@ nexti: mflr r1 /* R1 = our PC */
#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
#endif
-purge_old_ccsr_tlb:
- lis r8, CONFIG_SYS_CCSRBAR@h
- ori r8, r8, CONFIG_SYS_CCSRBAR@l
- lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
- ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
-
- /*
- * In a multi-stage boot (e.g. NAND boot), a previous stage may have
- * created a TLB for CCSR, which will interfere with our relocation
- * code. Since we're going to create a new TLB for CCSR anyway,
- * it should be safe to delete this old TLB here. We have to search
- * for it, though.
- */
-
- li r1, 0
- mtspr MAS6, r1 /* Search the current address space and PID */
- isync
- msync
- tlbsx 0, r8
- mfspr r1, MAS1
- andis. r2, r1, MAS1_VALID@h /* Check for the Valid bit */
- beq 1f /* Skip if no TLB found */
-
- rlwinm r1, r1, 0, 1, 31 /* Clear Valid bit */
- mtspr MAS1, r1
- isync
- msync
- tlbwe
-1:
-
create_ccsr_new_tlb:
/*
* Create a TLB for the new location of CCSR. Register R8 is reserved
* for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
*/
+ lis r8, CONFIG_SYS_CCSRBAR@h
+ ori r8, r8, CONFIG_SYS_CCSRBAR@l
+ lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
+ ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h
ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l
lis r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h
@@ -719,6 +766,253 @@ delete_temp_tlbs:
tlbwe
#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
+#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
+#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
+#define LAW_SIZE_1M 0x13
+#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
+
+ cmpwi r27,0
+ beq 9f
+
+ /*
+ * Create a TLB entry for CCSR
+ *
+ * We're executing out of TLB1 entry in r14, and that's the only
+ * TLB entry that exists. To allocate some TLB entries for our
+ * own use, flip a bit high enough that we won't flip it again
+ * via incrementing.
+ */
+
+ xori r8, r14, 32
+ lis r0, MAS0_TLBSEL(1)@h
+ rlwimi r0, r8, 16, MAS0_ESEL_MSK
+ lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
+ ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
+ lis r7, CONFIG_SYS_CCSRBAR@h
+ ori r7, r7, CONFIG_SYS_CCSRBAR@l
+ ori r2, r7, MAS2_I|MAS2_G
+ lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
+ ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
+ lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
+ ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
+ mtspr MAS0, r0
+ mtspr MAS1, r1
+ mtspr MAS2, r2
+ mtspr MAS3, r3
+ mtspr MAS7, r4
+ isync
+ tlbwe
+ isync
+ msync
+
+ /* Map DCSR temporarily to physical address zero */
+ li r0, 0
+ lis r3, DCSRBAR_LAWAR@h
+ ori r3, r3, DCSRBAR_LAWAR@l
+
+ stw r0, 0xc00(r7) /* LAWBARH0 */
+ stw r0, 0xc04(r7) /* LAWBARL0 */
+ sync
+ stw r3, 0xc08(r7) /* LAWAR0 */
+
+ /* Read back from LAWAR to ensure the update is complete. */
+ lwz r3, 0xc08(r7) /* LAWAR0 */
+ isync
+
+ /* Create a TLB entry for DCSR at zero */
+
+ addi r9, r8, 1
+ lis r0, MAS0_TLBSEL(1)@h
+ rlwimi r0, r9, 16, MAS0_ESEL_MSK
+ lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
+ ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
+ li r6, 0 /* DCSR effective address */
+ ori r2, r6, MAS2_I|MAS2_G
+ li r3, MAS3_SW|MAS3_SR
+ li r4, 0
+ mtspr MAS0, r0
+ mtspr MAS1, r1
+ mtspr MAS2, r2
+ mtspr MAS3, r3
+ mtspr MAS7, r4
+ isync
+ tlbwe
+ isync
+ msync
+
+ /* enable the timebase */
+#define CTBENR 0xe2084
+ li r3, 1
+ addis r4, r7, CTBENR@ha
+ stw r3, CTBENR@l(r4)
+ lwz r3, CTBENR@l(r4)
+ twi 0,r3,0
+ isync
+
+ .macro erratum_set_ccsr offset value
+ addis r3, r7, \offset@ha
+ lis r4, \value@h
+ addi r3, r3, \offset@l
+ ori r4, r4, \value@l
+ bl erratum_set_value
+ .endm
+
+ .macro erratum_set_dcsr offset value
+ addis r3, r6, \offset@ha
+ lis r4, \value@h
+ addi r3, r3, \offset@l
+ ori r4, r4, \value@l
+ bl erratum_set_value
+ .endm
+
+ erratum_set_dcsr 0xb0e08 0xe0201800
+ erratum_set_dcsr 0xb0e18 0xe0201800
+ erratum_set_dcsr 0xb0e38 0xe0400000
+ erratum_set_dcsr 0xb0008 0x00900000
+ erratum_set_dcsr 0xb0e40 0xe00a0000
+ erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
+ erratum_set_ccsr 0x10f00 0x415e5000
+ erratum_set_ccsr 0x11f00 0x415e5000
+
+ /* Make temp mapping uncacheable again, if it was initially */
+ bl 2f
+2: mflr r3
+ tlbsx 0, r3
+ mfspr r4, MAS2
+ rlwimi r4, r15, 0, MAS2_I
+ rlwimi r4, r15, 0, MAS2_G
+ mtspr MAS2, r4
+ isync
+ tlbwe
+ isync
+ msync
+
+ /* Clear the cache */
+ lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
+ ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
+ sync
+ isync
+ mtspr SPRN_L1CSR1,r3
+ isync
+2: sync
+ mfspr r4,SPRN_L1CSR1
+ and. r4,r4,r3
+ bne 2b
+
+ lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
+ ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
+ sync
+ isync
+ mtspr SPRN_L1CSR1,r3
+ isync
+2: sync
+ mfspr r4,SPRN_L1CSR1
+ and. r4,r4,r3
+ beq 2b
+
+ /* Remove temporary mappings */
+ lis r0, MAS0_TLBSEL(1)@h
+ rlwimi r0, r9, 16, MAS0_ESEL_MSK
+ li r3, 0
+ mtspr MAS0, r0
+ mtspr MAS1, r3
+ isync
+ tlbwe
+ isync
+ msync
+
+ li r3, 0
+ stw r3, 0xc08(r7) /* LAWAR0 */
+ lwz r3, 0xc08(r7)
+ isync
+
+ lis r0, MAS0_TLBSEL(1)@h
+ rlwimi r0, r8, 16, MAS0_ESEL_MSK
+ li r3, 0
+ mtspr MAS0, r0
+ mtspr MAS1, r3
+ isync
+ tlbwe
+ isync
+ msync
+
+ b 9f
+
+ /* r3 = addr, r4 = value, clobbers r5, r11, r12 */
+erratum_set_value:
+ /* Lock two cache lines into I-Cache */
+ sync
+ mfspr r11, SPRN_L1CSR1
+ rlwinm r11, r11, 0, ~L1CSR1_ICUL
+ sync
+ isync
+ mtspr SPRN_L1CSR1, r11
+ isync
+
+ mflr r12
+ bl 5f
+5: mflr r5
+ addi r5, r5, 2f - 5b
+ icbtls 0, 0, r5
+ addi r5, r5, 64
+
+ sync
+ mfspr r11, SPRN_L1CSR1
+3: andi. r11, r11, L1CSR1_ICUL
+ bne 3b
+
+ icbtls 0, 0, r5
+ addi r5, r5, 64
+
+ sync
+ mfspr r11, SPRN_L1CSR1
+3: andi. r11, r11, L1CSR1_ICUL
+ bne 3b
+
+ b 2f
+ .align 6
+ /* Inside a locked cacheline, wait a while, write, then wait a while */
+2: sync
+
+ mfspr r5, SPRN_TBRL
+ addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */
+4: mfspr r5, SPRN_TBRL
+ subf. r5, r5, r11
+ bgt 4b
+
+ stw r4, 0(r3)
+
+ mfspr r5, SPRN_TBRL
+ addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */
+4: mfspr r5, SPRN_TBRL
+ subf. r5, r5, r11
+ bgt 4b
+
+ sync
+
+ /*
+ * Fill out the rest of this cache line and the next with nops,
+ * to ensure that nothing outside the locked area will be
+ * fetched due to a branch.
+ */
+ .rept 19
+ nop
+ .endr
+
+ sync
+ mfspr r11, SPRN_L1CSR1
+ rlwinm r11, r11, 0, ~L1CSR1_ICUL
+ sync
+ isync
+ mtspr SPRN_L1CSR1, r11
+ isync
+
+ mtlr r12
+ blr
+
+9:
+#endif
+
create_init_ram_area:
lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h
ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
@@ -855,18 +1149,12 @@ version_string:
.globl _start_cont
_start_cont:
/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
- lis r1,CONFIG_SYS_INIT_RAM_ADDR@h
- ori r1,r1,CONFIG_SYS_INIT_SP_OFFSET@l
-
+ lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
+ ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
li r0,0
- stwu r0,-4(r1)
- stwu r0,-4(r1) /* Terminate call chain */
-
- stwu r1,-8(r1) /* Save back chain and move SP */
- lis r0,RESET_VECTOR@h /* Address of reset vector */
- ori r0,r0,RESET_VECTOR@l
- stwu r1,-8(r1) /* Save back chain and move SP */
- stw r0,+12(r1) /* Save return addr (underflow vect) */
+ stw r0,0(r3) /* Terminate Back Chain */
+ stw r0,+4(r3) /* NULL return address. */
+ mr r1,r3 /* Transfer to SP(r1) */
GET_GOT
bl cpu_init_early_f