summaryrefslogtreecommitdiff
path: root/arch/arm/cpu/armv8
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/cpu/armv8')
-rw-r--r--arch/arm/cpu/armv8/Makefile1
-rw-r--r--arch/arm/cpu/armv8/cache.S57
-rw-r--r--arch/arm/cpu/armv8/cache_v8.c25
-rw-r--r--arch/arm/cpu/armv8/gic.S106
-rw-r--r--arch/arm/cpu/armv8/start.S84
5 files changed, 105 insertions, 168 deletions
diff --git a/arch/arm/cpu/armv8/Makefile b/arch/arm/cpu/armv8/Makefile
index b6eb6de..7d93f59 100644
--- a/arch/arm/cpu/armv8/Makefile
+++ b/arch/arm/cpu/armv8/Makefile
@@ -13,5 +13,4 @@ obj-y += cache_v8.o
obj-y += exceptions.o
obj-y += cache.o
obj-y += tlb.o
-obj-y += gic.o
obj-y += transition.o
diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S
index 546a83e..4b3ee6e 100644
--- a/arch/arm/cpu/armv8/cache.S
+++ b/arch/arm/cpu/armv8/cache.S
@@ -19,23 +19,22 @@
* clean and invalidate one level cache.
*
* x0: cache level
- * x1~x9: clobbered
+ * x1: 0 flush & invalidate, 1 invalidate only
+ * x2~x9: clobbered
*/
ENTRY(__asm_flush_dcache_level)
- lsl x1, x0, #1
- msr csselr_el1, x1 /* select cache level */
+ lsl x12, x0, #1
+ msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
add x2, x2, #4 /* x2 <- log2(cache line size) */
mov x3, #0x3ff
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
- add w4, w3, w3
- sub w4, w4, 1 /* round up log2(#ways + 1) */
- clz w5, w4 /* bit position of #ways */
+ clz w5, w3 /* bit position of #ways */
mov x4, #0x7fff
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
- /* x1 <- cache level << 1 */
+ /* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
/* x4 <- number of cache sets - 1 */
@@ -45,11 +44,14 @@ loop_set:
mov x6, x3 /* x6 <- working copy of #ways */
loop_way:
lsl x7, x6, x5
- orr x9, x1, x7 /* map way and level to cisw value */
+ orr x9, x12, x7 /* map way and level to cisw value */
lsl x7, x4, x2
orr x9, x9, x7 /* map set number to cisw value */
- dc cisw, x9 /* clean & invalidate by set/way */
- subs x6, x6, #1 /* decrement the way */
+ tbz w1, #0, 1f
+ dc isw, x9
+ b 2f
+1: dc cisw, x9 /* clean & invalidate by set/way */
+2: subs x6, x6, #1 /* decrement the way */
b.ge loop_way
subs x4, x4, #1 /* decrement the set */
b.ge loop_set
@@ -58,11 +60,14 @@ loop_way:
ENDPROC(__asm_flush_dcache_level)
/*
- * void __asm_flush_dcache_all(void)
+ * void __asm_flush_dcache_all(int invalidate_only)
+ *
+ * x0: 0 flush & invalidate, 1 invalidate only
*
* clean and invalidate all data cache by SET/WAY.
*/
-ENTRY(__asm_flush_dcache_all)
+ENTRY(__asm_dcache_all)
+ mov x1, x0
dsb sy
mrs x10, clidr_el1 /* read clidr_el1 */
lsr x11, x10, #24
@@ -76,13 +81,13 @@ ENTRY(__asm_flush_dcache_all)
/* x15 <- return address */
loop_level:
- lsl x1, x0, #1
- add x1, x1, x0 /* x0 <- tripled cache level */
- lsr x1, x10, x1
- and x1, x1, #7 /* x1 <- cache type */
- cmp x1, #2
+ lsl x12, x0, #1
+ add x12, x12, x0 /* x0 <- tripled cache level */
+ lsr x12, x10, x12
+ and x12, x12, #7 /* x12 <- cache type */
+ cmp x12, #2
b.lt skip /* skip if no cache or icache */
- bl __asm_flush_dcache_level
+ bl __asm_flush_dcache_level /* x1 = 0 flush, 1 invalidate */
skip:
add x0, x0, #1 /* increment cache level */
cmp x11, x0
@@ -96,8 +101,24 @@ skip:
finished:
ret
+ENDPROC(__asm_dcache_all)
+
+ENTRY(__asm_flush_dcache_all)
+ mov x16, lr
+ mov x0, #0
+ bl __asm_dcache_all
+ mov lr, x16
+ ret
ENDPROC(__asm_flush_dcache_all)
+ENTRY(__asm_invalidate_dcache_all)
+ mov x16, lr
+ mov x0, #0xffff
+ bl __asm_dcache_all
+ mov lr, x16
+ ret
+ENDPROC(__asm_invalidate_dcache_all)
+
/*
* void __asm_flush_dcache_range(start, end)
*
diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
index 131fdab..a96ecda 100644
--- a/arch/arm/cpu/armv8/cache_v8.c
+++ b/arch/arm/cpu/armv8/cache_v8.c
@@ -45,15 +45,31 @@ static void mmu_setup(void)
/* load TTBR0 */
el = current_el();
- if (el == 1)
+ if (el == 1) {
asm volatile("msr ttbr0_el1, %0"
: : "r" (gd->arch.tlb_addr) : "memory");
- else if (el == 2)
+ asm volatile("msr tcr_el1, %0"
+ : : "r" (TCR_FLAGS | TCR_EL1_IPS_BITS)
+ : "memory");
+ asm volatile("msr mair_el1, %0"
+ : : "r" (MEMORY_ATTRIBUTES) : "memory");
+ } else if (el == 2) {
asm volatile("msr ttbr0_el2, %0"
: : "r" (gd->arch.tlb_addr) : "memory");
- else
+ asm volatile("msr tcr_el2, %0"
+ : : "r" (TCR_FLAGS | TCR_EL2_IPS_BITS)
+ : "memory");
+ asm volatile("msr mair_el2, %0"
+ : : "r" (MEMORY_ATTRIBUTES) : "memory");
+ } else {
asm volatile("msr ttbr0_el3, %0"
: : "r" (gd->arch.tlb_addr) : "memory");
+ asm volatile("msr tcr_el3, %0"
+ : : "r" (TCR_FLAGS | TCR_EL2_IPS_BITS)
+ : "memory");
+ asm volatile("msr mair_el3, %0"
+ : : "r" (MEMORY_ATTRIBUTES) : "memory");
+ }
/* enable the mmu */
set_sctlr(get_sctlr() | CR_M);
@@ -64,7 +80,7 @@ static void mmu_setup(void)
*/
void invalidate_dcache_all(void)
{
- __asm_flush_dcache_all();
+ __asm_invalidate_dcache_all();
}
/*
@@ -161,6 +177,7 @@ int dcache_status(void)
void icache_enable(void)
{
+ __asm_invalidate_icache_all();
set_sctlr(get_sctlr() | CR_I);
}
diff --git a/arch/arm/cpu/armv8/gic.S b/arch/arm/cpu/armv8/gic.S
deleted file mode 100644
index 599aa8f..0000000
--- a/arch/arm/cpu/armv8/gic.S
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * GIC Initialization Routines.
- *
- * (C) Copyright 2013
- * David Feng <fenghua@phytium.com.cn>
- *
- * SPDX-License-Identifier: GPL-2.0+
- */
-
-#include <asm-offsets.h>
-#include <config.h>
-#include <linux/linkage.h>
-#include <asm/macro.h>
-#include <asm/gic.h>
-
-
-/*************************************************************************
- *
- * void gic_init(void) __attribute__((weak));
- *
- * Currently, this routine only initialize secure copy of GIC
- * with Security Extensions at EL3.
- *
- *************************************************************************/
-WEAK(gic_init)
- branch_if_slave x0, 2f
-
- /* Initialize Distributor and SPIs */
- ldr x1, =GICD_BASE
- mov w0, #0x3 /* EnableGrp0 | EnableGrp1 */
- str w0, [x1, GICD_CTLR] /* Secure GICD_CTLR */
- ldr w0, [x1, GICD_TYPER]
- and w2, w0, #0x1f /* ITLinesNumber */
- cbz w2, 2f /* No SPIs */
- add x1, x1, (GICD_IGROUPRn + 4)
- mov w0, #~0 /* Config SPIs as Grp1 */
-1: str w0, [x1], #0x4
- sub w2, w2, #0x1
- cbnz w2, 1b
-
- /* Initialize SGIs and PPIs */
-2: ldr x1, =GICD_BASE
- mov w0, #~0 /* Config SGIs and PPIs as Grp1 */
- str w0, [x1, GICD_IGROUPRn] /* GICD_IGROUPR0 */
- mov w0, #0x1 /* Enable SGI 0 */
- str w0, [x1, GICD_ISENABLERn]
-
- /* Initialize Cpu Interface */
- ldr x1, =GICC_BASE
- mov w0, #0x1e7 /* Disable IRQ/FIQ Bypass & */
- /* Enable Ack Group1 Interrupt & */
- /* EnableGrp0 & EnableGrp1 */
- str w0, [x1, GICC_CTLR] /* Secure GICC_CTLR */
-
- mov w0, #0x1 << 7 /* Non-Secure access to GICC_PMR */
- str w0, [x1, GICC_PMR]
-
- ret
-ENDPROC(gic_init)
-
-
-/*************************************************************************
- *
- * void gic_send_sgi(u64 sgi) __attribute__((weak));
- *
- *************************************************************************/
-WEAK(gic_send_sgi)
- ldr x1, =GICD_BASE
- mov w2, #0x8000
- movk w2, #0x100, lsl #16
- orr w2, w2, w0
- str w2, [x1, GICD_SGIR]
- ret
-ENDPROC(gic_send_sgi)
-
-
-/*************************************************************************
- *
- * void wait_for_wakeup(void) __attribute__((weak));
- *
- * Wait for SGI 0 from master.
- *
- *************************************************************************/
-WEAK(wait_for_wakeup)
- ldr x1, =GICC_BASE
-0: wfi
- ldr w0, [x1, GICC_AIAR]
- str w0, [x1, GICC_AEOIR]
- cbnz w0, 0b
- ret
-ENDPROC(wait_for_wakeup)
-
-
-/*************************************************************************
- *
- * void smp_kick_all_cpus(void) __attribute__((weak));
- *
- *************************************************************************/
-WEAK(smp_kick_all_cpus)
- /* Kick secondary cpus up by SGI 0 interrupt */
- mov x0, xzr /* SGI 0 */
- mov x29, lr /* Save LR */
- bl gic_send_sgi
- mov lr, x29 /* Restore LR */
- ret
-ENDPROC(smp_kick_all_cpus)
diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S
index bcc2603..33d3f36 100644
--- a/arch/arm/cpu/armv8/start.S
+++ b/arch/arm/cpu/armv8/start.S
@@ -50,7 +50,10 @@ reset:
*/
adr x0, vectors
switch_el x1, 3f, 2f, 1f
-3: msr vbar_el3, x0
+3: mrs x0, scr_el3
+ orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
+ msr scr_el3, x0
+ msr vbar_el3, x0
msr cptr_el3, xzr /* Enable FP/SIMD */
ldr x0, =COUNTER_FREQUENCY
msr cntfrq_el0, x0 /* Initialize CNTFRQ */
@@ -64,10 +67,12 @@ reset:
msr cpacr_el1, x0 /* Enable FP/SIMD */
0:
- /* Cache/BPB/TLB Invalidate */
- bl __asm_flush_dcache_all /* dCache clean&invalidate */
- bl __asm_invalidate_icache_all /* iCache invalidate */
- bl __asm_invalidate_tlb_all /* invalidate TLBs */
+ /*
+ * Cache/BPB/TLB Invalidate
+ * i-cache is invalidated before enabled in icache_enable()
+ * tlb is invalidated before mmu is enabled in dcache_enable()
+ * d-cache is invalidated before enabled in dcache_enable()
+ */
/* Processor specific initialization */
bl lowlevel_init
@@ -93,63 +98,64 @@ master_cpu:
/*-----------------------------------------------------------------------*/
WEAK(lowlevel_init)
- /* Initialize GIC Secure Bank Status */
mov x29, lr /* Save LR */
- bl gic_init
- branch_if_master x0, x1, 1f
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+ branch_if_slave x0, 1f
+ ldr x0, =GICD_BASE
+ bl gic_init_secure
+1:
+#if defined(CONFIG_GICV3)
+ ldr x0, =GICR_BASE
+ bl gic_init_secure_percpu
+#elif defined(CONFIG_GICV2)
+ ldr x0, =GICD_BASE
+ ldr x1, =GICC_BASE
+ bl gic_init_secure_percpu
+#endif
+#endif
+
+ branch_if_master x0, x1, 2f
/*
* Slave should wait for master clearing spin table.
* This sync prevent salves observing incorrect
* value of spin table and jumping to wrong place.
*/
- bl wait_for_wakeup
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+#ifdef CONFIG_GICV2
+ ldr x0, =GICC_BASE
+#endif
+ bl gic_wait_for_interrupt
+#endif
/*
- * All processors will enter EL2 and optionally EL1.
+ * All slaves will enter EL2 and optionally EL1.
*/
bl armv8_switch_to_el2
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
bl armv8_switch_to_el1
#endif
-1:
+2:
mov lr, x29 /* Restore LR */
ret
ENDPROC(lowlevel_init)
-/*-----------------------------------------------------------------------*/
-
-ENTRY(c_runtime_cpu_setup)
- /* If I-cache is enabled invalidate it */
-#ifndef CONFIG_SYS_ICACHE_OFF
- ic iallu /* I+BTB cache invalidate */
- isb sy
+WEAK(smp_kick_all_cpus)
+ /* Kick secondary cpus up by SGI 0 interrupt */
+ mov x29, lr /* Save LR */
+#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
+ ldr x0, =GICD_BASE
+ bl gic_kick_secondary_cpus
#endif
+ mov lr, x29 /* Restore LR */
+ ret
+ENDPROC(smp_kick_all_cpus)
-#ifndef CONFIG_SYS_DCACHE_OFF
- /*
- * Setup MAIR and TCR.
- */
- ldr x0, =MEMORY_ATTRIBUTES
- ldr x1, =TCR_FLAGS
-
- switch_el x2, 3f, 2f, 1f
-3: orr x1, x1, TCR_EL3_IPS_BITS
- msr mair_el3, x0
- msr tcr_el3, x1
- b 0f
-2: orr x1, x1, TCR_EL2_IPS_BITS
- msr mair_el2, x0
- msr tcr_el2, x1
- b 0f
-1: orr x1, x1, TCR_EL1_IPS_BITS
- msr mair_el1, x0
- msr tcr_el1, x1
-0:
-#endif
+/*-----------------------------------------------------------------------*/
+ENTRY(c_runtime_cpu_setup)
/* Relocate vBAR */
adr x0, vectors
switch_el x1, 3f, 2f, 1f