summaryrefslogtreecommitdiff
path: root/arch/ia64/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 00:41:04 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 00:41:04 +0300
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/ia64/lib
parent5fab10041b4389b61de7e7a49893190bae686241 (diff)
parent2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff)
downloadlinux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.xz
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
Diffstat (limited to 'arch/ia64/lib')
-rw-r--r--arch/ia64/lib/memcpy_mck.S13
1 files changed, 1 insertions, 12 deletions
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index b264b6a7967b..bbbadc478f5b 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -556,9 +556,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
#define D r22
#define F r28
-#define memset_arg0 r32
-#define memset_arg2 r33
-
#define saved_retval loc0
#define saved_rtlink loc1
#define saved_pfs_stack loc2
@@ -622,7 +619,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
* (faulting_addr - orig_dst) -> len to faulting st address
* B = (cur_dst - orig_dst) -> len copied so far
* C = A - B -> len need to be copied
- * D = orig_len - A -> len need to be zeroed
+ * D = orig_len - A -> len need to be left along
*/
(p6) sub A = F, saved_in0
(p7) sub A = F, saved_in1
@@ -638,9 +635,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
sub D = saved_in2, A
;;
cmp.gt p8,p0=C,r0 // more than 1 byte?
- add memset_arg0=saved_in0, A
-(p6) mov memset_arg2=0 // copy_to_user should not call memset
-(p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed
mov r8=0
mov saved_retval = D
mov saved_rtlink = b0
@@ -652,11 +646,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
;;
add saved_retval=saved_retval,r8 // above might return non-zero value
- cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte?
- mov out0=memset_arg0 // *s
- mov out1=r0 // c
- mov out2=memset_arg2 // n
-(p8) br.call.sptk.few b0=memset
;;
mov retval=saved_retval