summaryrefslogtreecommitdiff
path: root/arch/arm/cpu/armv8/cache.S
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2016-03-04 01:09:47 +0100
committerTom Rini <trini@konsulko.com>2016-03-15 15:13:01 -0400
commit5e2ec773bb6c5acf22d8652112856e87cff86ea4 (patch)
treeae75a1cab47d1d59c91ccc5a5d2579c64dab47a1 /arch/arm/cpu/armv8/cache.S
parent9bb367a590feac21d674e4d2cee77702d4774819 (diff)
downloadu-boot-imx-5e2ec773bb6c5acf22d8652112856e87cff86ea4.zip
u-boot-imx-5e2ec773bb6c5acf22d8652112856e87cff86ea4.tar.gz
u-boot-imx-5e2ec773bb6c5acf22d8652112856e87cff86ea4.tar.bz2
arm64: Make full va map code more dynamic
The idea to generate our pages tables from an array of memory ranges is very sound. However, instead of hard coding the code to create up to 2 levels of 64k granule page tables, we really should just create normal 4k page tables that allow us to set caching attributes on 2M or 4k level later on. So this patch moves the full_va mapping code to 4k page size and makes it fully flexible to dynamically create as many levels as necessary for a map (including dynamic 1G/2M pages). It also adds support to dynamically split a large map into smaller ones when some code wants to set dcache attributes. With all this in place, there is very little reason to create your own page tables in board specific files. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/arm/cpu/armv8/cache.S')
-rw-r--r--arch/arm/cpu/armv8/cache.S54
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S
index ab8c089..a9f4fec 100644
--- a/arch/arm/cpu/armv8/cache.S
+++ b/arch/arm/cpu/armv8/cache.S
@@ -10,6 +10,7 @@
#include <asm-offsets.h>
#include <config.h>
#include <asm/macro.h>
+#include <asm/system.h>
#include <linux/linkage.h>
/*
@@ -160,3 +161,56 @@ ENTRY(__asm_flush_l3_cache)
ret
ENDPROC(__asm_flush_l3_cache)
.weak __asm_flush_l3_cache
+
+/*
+ * void __asm_switch_ttbr(ulong new_ttbr)
+ *
+ * Safely switches to a new page table.
+ */
+ENTRY(__asm_switch_ttbr)
+ /* x2 = SCTLR (alive throghout the function) */
+ switch_el x4, 3f, 2f, 1f
+3: mrs x2, sctlr_el3
+ b 0f
+2: mrs x2, sctlr_el2
+ b 0f
+1: mrs x2, sctlr_el1
+0:
+
+ /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
+ movn x1, #(CR_M | CR_C | CR_I)
+ and x1, x2, x1
+ switch_el x4, 3f, 2f, 1f
+3: msr sctlr_el3, x1
+ b 0f
+2: msr sctlr_el2, x1
+ b 0f
+1: msr sctlr_el1, x1
+0: isb
+
+ /* This call only clobbers x30 (lr) and x9 (unused) */
+ mov x3, x30
+ bl __asm_invalidate_tlb_all
+
+ /* From here on we're running safely with caches disabled */
+
+ /* Set TTBR to our first argument */
+ switch_el x4, 3f, 2f, 1f
+3: msr ttbr0_el3, x0
+ b 0f
+2: msr ttbr0_el2, x0
+ b 0f
+1: msr ttbr0_el1, x0
+0: isb
+
+ /* Restore original SCTLR and thus enable caches again */
+ switch_el x4, 3f, 2f, 1f
+3: msr sctlr_el3, x2
+ b 0f
+2: msr sctlr_el2, x2
+ b 0f
+1: msr sctlr_el1, x2
+0: isb
+
+ ret x3
+ENDPROC(__asm_switch_ttbr)