summaryrefslogtreecommitdiff
path: root/arch/arm/mm/copypage-v4mc.c
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2018-11-07 19:49:00 +0300
committerRussell King <rmk+kernel@armlinux.org.uk>2018-11-08 13:57:09 +0300
commitb99afae1390140f5b0039e6b37a7380de31ae874 (patch)
treeeec57bfb462e01483c0dc6a7848e24b8ac6334c7 /arch/arm/mm/copypage-v4mc.c
parent31d0b9f9982f8e3a489e83419461d35ab003160a (diff)
downloadlinux-b99afae1390140f5b0039e6b37a7380de31ae874.tar.xz
ARM: 8805/2: remove unneeded naked function usage
The naked attribute is known to confuse some old gcc versions when function arguments aren't explicitly listed as inline assembly operands despite the gcc documentation. That resulted in commit 9a40ac86152c ("ARM: 6164/1: Add kto and kfrom to input operands list."). Yet that commit has problems of its own by having assembly operand constraints completely wrong. If the generated code has been OK since then, it is due to luck rather than correctness. So this patch also provides proper assembly operand constraints, and removes two instances of redundant register usages in the implementation while at it. Inspection of the generated code with this patch doesn't show any obvious quality degradation either, so not relying on __naked at all will make the code less fragile, and avoid some issues with clang. The only remaining __naked instances (excluding the kprobes test cases) are exynos_pm_power_up_setup(), tc2_pm_power_up_setup() and cci_enable_port_for_self(. But in the first two cases, only the function address is used by the compiler with no chance of inlining it by mistake, and the third case is called from assembly code only. And the fact that no stack is available when the corresponding code is executed does warrant the __naked usage in those cases. Signed-off-by: Nicolas Pitre <nico@linaro.org> Reviewed-by: Stefan Agner <stefan@agner.ch> Tested-by: Stefan Agner <stefan@agner.ch> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v4mc.c')
-rw-r--r--arch/arm/mm/copypage-v4mc.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 0224416cba3c..b03202cddddb 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
-static void __naked
-mc_copy_user_page(void *from, void *to)
+static void mc_copy_user_page(void *from, void *to)
{
- asm volatile(
- "stmfd sp!, {r4, lr} @ 2\n\
- mov r4, %2 @ 1\n\
+ int tmp;
+
+ asm volatile ("\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
@@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to)
mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
- subs r4, r4, #1 @ 1\n\
+ subs %2, %2, #1 @ 1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
- bne 1b @ 1\n\
- ldmfd sp!, {r4, pc} @ 3"
- :
- : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
+ bne 1b @ "
+ : "+&r" (from), "+&r" (to), "=&r" (tmp)
+ : "2" (PAGE_SIZE / 64)
+ : "r2", "r3", "ip", "lr");
}
void v4_mc_copy_user_highpage(struct page *to, struct page *from,