From df120142f36b6ff8b12187b8860269763b2b3203 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 9 Apr 2016 13:53:49 +0200 Subject: arm: Replace v7_maint_dcache_all(ARMV7_DCACHE_INVAL_ALL) with asm code Lets be consistent and also replace v7_maint_dcache_all() with asm code for the invalidate case. Signed-off-by: Hans de Goede --- arch/arm/cpu/armv7/cache_v7_asm.S | 70 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) (limited to 'arch/arm/cpu/armv7/cache_v7_asm.S') diff --git a/arch/arm/cpu/armv7/cache_v7_asm.S b/arch/arm/cpu/armv7/cache_v7_asm.S index 2e4629f..a433628 100644 --- a/arch/arm/cpu/armv7/cache_v7_asm.S +++ b/arch/arm/cpu/armv7/cache_v7_asm.S @@ -82,3 +82,73 @@ ENTRY(v7_flush_dcache_all) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) bx lr ENDPROC(v7_flush_dcache_all) + +/* + * v7_invalidate_dcache_all() + * + * Invalidate the whole D-cache. + * + * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) + * + * Note: copied from __v7_flush_dcache_all above with + * mcr p15, 0, r11, c7, c14, 2 + * Replaced with: + * mcr p15, 0, r11, c7, c6, 2 + */ +ENTRY(__v7_invalidate_dcache_all) + dmb @ ensure ordering with previous memory accesses + mrc p15, 1, r0, c0, c0, 1 @ read clidr + mov r3, r0, lsr #23 @ move LoC into position + ands r3, r3, #7 << 1 @ extract LoC*2 from clidr + beq inval_finished @ if loc is 0, then no need to clean + mov r10, #0 @ start clean at cache level 0 +inval_levels: + add r2, r10, r10, lsr #1 @ work out 3x current cache level + mov r1, r0, lsr r2 @ extract cache type bits from clidr + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt inval_skip @ skip if no cache, or just i-cache + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + movw r4, #0x3ff + ands r4, r4, r1, lsr #3 @ find maximum number on the way size + clz r5, r4 @ find bit position of way size increment + movw r7, #0x7fff + ands r7, r7, r1, lsr #13 @ extract max number of the index size +inval_loop1: + mov r9, r7 @ create working copy of max index +inval_loop2: + ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11 + THUMB( lsl r6, r4, r5 ) + THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 + ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11 + THUMB( lsl r6, r9, r2 ) + THUMB( orr r11, r11, r6 ) @ factor index number into r11 + mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way + subs r9, r9, #1 @ decrement the index + bge inval_loop2 + subs r4, r4, #1 @ decrement the way + bge inval_loop1 +inval_skip: + add r10, r10, #2 @ increment cache number + cmp r3, r10 + bgt inval_levels +inval_finished: + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + dsb st + isb + bx lr +ENDPROC(__v7_invalidate_dcache_all) + +ENTRY(v7_invalidate_dcache_all) + ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) + THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) + bl __v7_invalidate_dcache_all + ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) + THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) + bx lr +ENDPROC(v7_invalidate_dcache_all) -- cgit v1.1