summaryrefslogtreecommitdiff
path: root/arch/arm/mm/cache-v7.S
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-07-24 15:32:56 +0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-07-24 15:32:56 +0400
commit347c8b70b1d5256e445e54e736f88d21877616cf (patch)
tree513ae480cdb8eb46ec68aab77ab78c5d8b3a205b /arch/arm/mm/cache-v7.S
parentb86040a59feb255a8193173caa4d5199464433d5 (diff)
downloadlinux-347c8b70b1d5256e445e54e736f88d21877616cf.tar.xz
Thumb-2: Implement the unified arch/arm/mm support
This patch adds the ARM/Thumb-2 unified support to the arch/arm/mm/* files. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/mm/cache-v7.S')
-rw-r--r--arch/arm/mm/cache-v7.S16
1 files changed, 11 insertions, 5 deletions
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index be93ff02a98d..bda0ec31a4e2 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -21,7 +21,7 @@
*
* Flush the whole D-cache.
*
- * Corrupted registers: r0-r5, r7, r9-r11
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*
* - mm - mm_struct describing address space
*/
@@ -51,8 +51,12 @@ loop1:
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
+ ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r9, r5 )
+ THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
+ ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r7, r2 )
+ THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
@@ -82,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
- stmfd sp!, {r4-r5, r7, r9-r11, lr}
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
- ldmfd sp!, {r4-r5, r7, r9-r11, lr}
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)