Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
This commit is contained in:
@@ -7,6 +7,7 @@ generic-y += clkdev.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
generic-y += extable.h
|
||||
generic-y += fb.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += hw_irq.h
|
||||
|
@@ -18,15 +18,11 @@
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm-generic/uaccess-unaligned.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
/*
|
||||
* The fs value determines whether argument validity checking should be
|
||||
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
||||
@@ -102,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size);
|
||||
likely(__range_ok((unsigned long)(addr), (size)) == 0); \
|
||||
})
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
|
||||
struct exception_table_entry {
|
||||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
#include <asm/extable.h>
|
||||
|
||||
/*
|
||||
* This is a type: either unsigned long, if the argument fits into
|
||||
@@ -334,145 +313,16 @@ extern int __put_user_bad(void)
|
||||
((x) = 0, -EFAULT); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __copy_to_user() - copy data into user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* An alternate version - __copy_to_user_inatomic() - is designed
|
||||
* to be called from atomic context, typically bracketed by calls
|
||||
* to pagefault_disable() and pagefault_enable().
|
||||
*/
|
||||
extern unsigned long __must_check __copy_to_user_inatomic(
|
||||
void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_to_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_to_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* __copy_from_user() - copy data from user space, with less checking.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*
|
||||
* An alternate version - __copy_from_user_inatomic() - is designed
|
||||
* to be called from atomic context, typically bracketed by calls
|
||||
* to pagefault_disable() and pagefault_enable(). This version
|
||||
* does *NOT* pad with zeros.
|
||||
*/
|
||||
extern unsigned long __must_check __copy_from_user_inatomic(
|
||||
void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_from_user_zeroing(
|
||||
void *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_from_user_zeroing(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_from_user(to, from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
||||
if (likely(sz == -1 || sz >= n))
|
||||
n = _copy_from_user(to, from, n);
|
||||
else if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__bad_copy_user();
|
||||
|
||||
return n;
|
||||
}
|
||||
extern unsigned long __must_check
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
#ifdef __tilegx__
|
||||
/**
|
||||
* __copy_in_user() - copy data within user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to user space. Caller must check
|
||||
* the specified blocks with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
extern unsigned long __copy_in_user_inatomic(
|
||||
extern unsigned long raw_copy_in_user(
|
||||
void __user *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_in_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_in_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
|
||||
|
||||
/* arch/tile/lib/, various memcpy files */
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(__copy_to_user_inatomic);
|
||||
EXPORT_SYMBOL(__copy_from_user_inatomic);
|
||||
EXPORT_SYMBOL(__copy_from_user_zeroing);
|
||||
EXPORT_SYMBOL(raw_copy_to_user);
|
||||
EXPORT_SYMBOL(raw_copy_from_user);
|
||||
#ifdef __tilegx__
|
||||
EXPORT_SYMBOL(__copy_in_user_inatomic);
|
||||
EXPORT_SYMBOL(raw_copy_in_user);
|
||||
#endif
|
||||
|
||||
/* hypervisor glue */
|
||||
|
@@ -24,7 +24,6 @@
|
||||
|
||||
#define IS_MEMCPY 0
|
||||
#define IS_COPY_FROM_USER 1
|
||||
#define IS_COPY_FROM_USER_ZEROING 2
|
||||
#define IS_COPY_TO_USER -1
|
||||
|
||||
.section .text.memcpy_common, "ax"
|
||||
@@ -42,40 +41,31 @@
|
||||
9
|
||||
|
||||
|
||||
/* __copy_from_user_inatomic takes the kernel target address in r0,
|
||||
/* raw_copy_from_user takes the kernel target address in r0,
|
||||
* the user source in r1, and the bytes to copy in r2.
|
||||
* It returns the number of uncopiable bytes (hopefully zero) in r0.
|
||||
*/
|
||||
ENTRY(__copy_from_user_inatomic)
|
||||
.type __copy_from_user_inatomic, @function
|
||||
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
|
||||
ENTRY(raw_copy_from_user)
|
||||
.type raw_copy_from_user, @function
|
||||
FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
|
||||
.text.memcpy_common, \
|
||||
.Lend_memcpy_common - __copy_from_user_inatomic)
|
||||
.Lend_memcpy_common - raw_copy_from_user)
|
||||
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
|
||||
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
|
||||
.size raw_copy_from_user, . - raw_copy_from_user
|
||||
|
||||
/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
|
||||
* any uncopiable bytes are zeroed in the target.
|
||||
*/
|
||||
ENTRY(__copy_from_user_zeroing)
|
||||
.type __copy_from_user_zeroing, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
|
||||
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
|
||||
|
||||
/* __copy_to_user_inatomic takes the user target address in r0,
|
||||
/* raw_copy_to_user takes the user target address in r0,
|
||||
* the kernel source in r1, and the bytes to copy in r2.
|
||||
* It returns the number of uncopiable bytes (hopefully zero) in r0.
|
||||
*/
|
||||
ENTRY(__copy_to_user_inatomic)
|
||||
.type __copy_to_user_inatomic, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
ENTRY(raw_copy_to_user)
|
||||
.type raw_copy_to_user, @function
|
||||
FEEDBACK_REENTER(raw_copy_from_user)
|
||||
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
|
||||
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
|
||||
.size raw_copy_to_user, . - raw_copy_to_user
|
||||
|
||||
ENTRY(memcpy)
|
||||
.type memcpy, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
FEEDBACK_REENTER(raw_copy_from_user)
|
||||
{ movei r29, IS_MEMCPY }
|
||||
.size memcpy, . - memcpy
|
||||
/* Fall through */
|
||||
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
|
||||
{ bnzt r2, copy_from_user_fixup_loop }
|
||||
|
||||
.Lcopy_from_user_fixup_zero_remainder:
|
||||
{ bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */
|
||||
/* byte-at-a-time loop faulted, so zero the rest. */
|
||||
{ move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
|
||||
1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
|
||||
{ bnzt r3, 1b }
|
||||
2: move lr, r27
|
||||
move lr, r27
|
||||
{ move r0, r2; jrp lr }
|
||||
|
||||
copy_to_user_fixup_loop:
|
||||
|
@@ -51,7 +51,7 @@
|
||||
__v; \
|
||||
})
|
||||
|
||||
#define USERCOPY_FUNC __copy_to_user_inatomic
|
||||
#define USERCOPY_FUNC raw_copy_to_user
|
||||
#define ST1(p, v) _ST((p), st1, (v))
|
||||
#define ST2(p, v) _ST((p), st2, (v))
|
||||
#define ST4(p, v) _ST((p), st4, (v))
|
||||
@@ -62,7 +62,7 @@
|
||||
#define LD8 LD
|
||||
#include "memcpy_64.c"
|
||||
|
||||
#define USERCOPY_FUNC __copy_from_user_inatomic
|
||||
#define USERCOPY_FUNC raw_copy_from_user
|
||||
#define ST1 ST
|
||||
#define ST2 ST
|
||||
#define ST4 ST
|
||||
@@ -73,7 +73,7 @@
|
||||
#define LD8(p) _LD((p), ld)
|
||||
#include "memcpy_64.c"
|
||||
|
||||
#define USERCOPY_FUNC __copy_in_user_inatomic
|
||||
#define USERCOPY_FUNC raw_copy_in_user
|
||||
#define ST1(p, v) _ST((p), st1, (v))
|
||||
#define ST2(p, v) _ST((p), st2, (v))
|
||||
#define ST4(p, v) _ST((p), st4, (v))
|
||||
@@ -83,12 +83,3 @@
|
||||
#define LD4(p) _LD((p), ld4u)
|
||||
#define LD8(p) _LD((p), ld)
|
||||
#include "memcpy_64.c"
|
||||
|
||||
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
unsigned long rc = __copy_from_user_inatomic(to, from, n);
|
||||
if (unlikely(rc))
|
||||
memset(to + n - rc, 0, rc);
|
||||
return rc;
|
||||
}
|
||||
|
Reference in New Issue
Block a user