tile: get rid of zeroing, switch to RAW_COPY_USER

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro
2017-03-21 14:27:36 -04:00
parent c0ea73f18c
commit 23504bae7f
5 changed files with 27 additions and 180 deletions

View File

@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic);
EXPORT_SYMBOL(__copy_from_user_inatomic);
EXPORT_SYMBOL(__copy_from_user_zeroing);
EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(raw_copy_from_user);
#ifdef __tilegx__
EXPORT_SYMBOL(__copy_in_user_inatomic);
EXPORT_SYMBOL(raw_copy_in_user);
#endif
/* hypervisor glue */

View File

@@ -24,7 +24,6 @@
#define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1
#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1
.section .text.memcpy_common, "ax"
@@ -42,40 +41,31 @@
9
/* __copy_from_user_inatomic takes the kernel target address in r0,
/* raw_copy_from_user takes the kernel target address in r0,
* the user source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
*/
ENTRY(__copy_from_user_inatomic)
.type __copy_from_user_inatomic, @function
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
ENTRY(raw_copy_from_user)
.type raw_copy_from_user, @function
FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
.text.memcpy_common, \
.Lend_memcpy_common - __copy_from_user_inatomic)
.Lend_memcpy_common - raw_copy_from_user)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
.size raw_copy_from_user, . - raw_copy_from_user
/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
* any uncopiable bytes are zeroed in the target.
*/
ENTRY(__copy_from_user_zeroing)
.type __copy_from_user_zeroing, @function
FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
/* __copy_to_user_inatomic takes the user target address in r0,
/* raw_copy_to_user takes the user target address in r0,
* the kernel source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
*/
ENTRY(__copy_to_user_inatomic)
.type __copy_to_user_inatomic, @function
FEEDBACK_REENTER(__copy_from_user_inatomic)
ENTRY(raw_copy_to_user)
.type raw_copy_to_user, @function
FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
.size raw_copy_to_user, . - raw_copy_to_user
ENTRY(memcpy)
.type memcpy, @function
FEEDBACK_REENTER(__copy_from_user_inatomic)
FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_MEMCPY }
.size memcpy, . - memcpy
/* Fall through */
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{ bnzt r2, copy_from_user_fixup_loop }
.Lcopy_from_user_fixup_zero_remainder:
{ bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */
/* byte-at-a-time loop faulted, so zero the rest. */
{ move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
{ bnzt r3, 1b }
2: move lr, r27
move lr, r27
{ move r0, r2; jrp lr }
copy_to_user_fixup_loop:

View File

@@ -51,7 +51,7 @@
__v; \
})
#define USERCOPY_FUNC __copy_to_user_inatomic
#define USERCOPY_FUNC raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
@@ -62,7 +62,7 @@
#define LD8 LD
#include "memcpy_64.c"
#define USERCOPY_FUNC __copy_from_user_inatomic
#define USERCOPY_FUNC raw_copy_from_user
#define ST1 ST
#define ST2 ST
#define ST4 ST
@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
#define USERCOPY_FUNC __copy_in_user_inatomic
#define USERCOPY_FUNC raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
unsigned long n)
{
unsigned long rc = __copy_from_user_inatomic(to, from, n);
if (unlikely(rc))
memset(to + n - rc, 0, rc);
return rc;
}