Merge branches 'uaccess.alpha', 'uaccess.arc', 'uaccess.arm', 'uaccess.arm64', 'uaccess.avr32', 'uaccess.bfin', 'uaccess.c6x', 'uaccess.cris', 'uaccess.frv', 'uaccess.h8300', 'uaccess.hexagon', 'uaccess.ia64', 'uaccess.m32r', 'uaccess.m68k', 'uaccess.metag', 'uaccess.microblaze', 'uaccess.mips', 'uaccess.mn10300', 'uaccess.nios2', 'uaccess.openrisc', 'uaccess.parisc', 'uaccess.powerpc', 'uaccess.s390', 'uaccess.score', 'uaccess.sh', 'uaccess.sparc', 'uaccess.tile', 'uaccess.um', 'uaccess.unicore32', 'uaccess.x86' and 'uaccess.xtensa' into work.uaccess
此提交包含在:

父節點
bf7af0cea8
ec022681a4
e13909a4ac
4de5b63e76
92430dab36
e5c1540030
50e9ab915a
86944ee158
0c7e9a870e
48f666c986
33ab2da801
ac4691fac8
b3622d3217
9a677341cd
29be02eb6f
840db3f938
d491afb865
2260ea86c0
19dbf70c5a
de51d6cc2c
ab89866be3
f64fd180ec
3448890c32
37096003c8
dc14253523
f98f48ee7c
31af2f36d5
23504bae7f
a668ce3a00
2ef59f2856
beba3a20bf
7d4914db8f
當前提交
eea86b637a
@@ -33,6 +33,7 @@ config TILE
|
||||
select USER_STACKTRACE_SUPPORT
|
||||
select USE_PMC if PERF_EVENTS
|
||||
select VIRT_TO_BUS
|
||||
select ARCH_HAS_RAW_COPY_USER
|
||||
|
||||
config MMU
|
||||
def_bool y
|
||||
|
@@ -7,6 +7,7 @@ generic-y += clkdev.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
generic-y += extable.h
|
||||
generic-y += fb.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += hw_irq.h
|
||||
|
@@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
|
||||
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
|
||||
|
||||
/* We have no pmd or pud since we are strictly a two-level page table */
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline int pud_huge_page(pud_t pud) { return 0; }
|
||||
|
@@ -59,6 +59,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* We have no pud since we are a three-level page table. */
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
/*
|
||||
|
@@ -18,15 +18,11 @@
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm-generic/uaccess-unaligned.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
/*
|
||||
* The fs value determines whether argument validity checking should be
|
||||
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
||||
@@ -102,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size);
|
||||
likely(__range_ok((unsigned long)(addr), (size)) == 0); \
|
||||
})
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
|
||||
struct exception_table_entry {
|
||||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
extern int fixup_exception(struct pt_regs *regs);
|
||||
#include <asm/extable.h>
|
||||
|
||||
/*
|
||||
* This is a type: either unsigned long, if the argument fits into
|
||||
@@ -334,145 +313,16 @@ extern int __put_user_bad(void)
|
||||
((x) = 0, -EFAULT); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __copy_to_user() - copy data into user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in kernel space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from kernel space to user space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* An alternate version - __copy_to_user_inatomic() - is designed
|
||||
* to be called from atomic context, typically bracketed by calls
|
||||
* to pagefault_disable() and pagefault_enable().
|
||||
*/
|
||||
extern unsigned long __must_check __copy_to_user_inatomic(
|
||||
void __user *to, const void *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_to_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_to_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* __copy_from_user() - copy data from user space, with less checking.
|
||||
* @to: Destination address, in kernel space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to kernel space. Caller must check
|
||||
* the specified block with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*
|
||||
* An alternate version - __copy_from_user_inatomic() - is designed
|
||||
* to be called from atomic context, typically bracketed by calls
|
||||
* to pagefault_disable() and pagefault_enable(). This version
|
||||
* does *NOT* pad with zeros.
|
||||
*/
|
||||
extern unsigned long __must_check __copy_from_user_inatomic(
|
||||
void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_from_user_zeroing(
|
||||
void *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_from_user_zeroing(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_from_user(to, from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
extern void __compiletime_error("usercopy buffer size is too small")
|
||||
__bad_copy_user(void);
|
||||
|
||||
static inline void copy_user_overflow(int size, unsigned long count)
|
||||
{
|
||||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
||||
if (likely(sz == -1 || sz >= n))
|
||||
n = _copy_from_user(to, from, n);
|
||||
else if (!__builtin_constant_p(n))
|
||||
copy_user_overflow(sz, n);
|
||||
else
|
||||
__bad_copy_user();
|
||||
|
||||
return n;
|
||||
}
|
||||
extern unsigned long __must_check
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
#ifdef __tilegx__
|
||||
/**
|
||||
* __copy_in_user() - copy data within user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @from: Source address, in user space.
|
||||
* @n: Number of bytes to copy.
|
||||
*
|
||||
* Context: User context only. This function may sleep if pagefaults are
|
||||
* enabled.
|
||||
*
|
||||
* Copy data from user space to user space. Caller must check
|
||||
* the specified blocks with access_ok() before calling this function.
|
||||
*
|
||||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
extern unsigned long __copy_in_user_inatomic(
|
||||
extern unsigned long raw_copy_in_user(
|
||||
void __user *to, const void __user *from, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
return __copy_in_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_in_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
|
||||
|
||||
/* arch/tile/lib/, various memcpy files */
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(__copy_to_user_inatomic);
|
||||
EXPORT_SYMBOL(__copy_from_user_inatomic);
|
||||
EXPORT_SYMBOL(__copy_from_user_zeroing);
|
||||
EXPORT_SYMBOL(raw_copy_to_user);
|
||||
EXPORT_SYMBOL(raw_copy_from_user);
|
||||
#ifdef __tilegx__
|
||||
EXPORT_SYMBOL(__copy_in_user_inatomic);
|
||||
EXPORT_SYMBOL(raw_copy_in_user);
|
||||
#endif
|
||||
|
||||
/* hypervisor glue */
|
||||
|
@@ -24,7 +24,6 @@
|
||||
|
||||
#define IS_MEMCPY 0
|
||||
#define IS_COPY_FROM_USER 1
|
||||
#define IS_COPY_FROM_USER_ZEROING 2
|
||||
#define IS_COPY_TO_USER -1
|
||||
|
||||
.section .text.memcpy_common, "ax"
|
||||
@@ -42,40 +41,31 @@
|
||||
9
|
||||
|
||||
|
||||
/* __copy_from_user_inatomic takes the kernel target address in r0,
|
||||
/* raw_copy_from_user takes the kernel target address in r0,
|
||||
* the user source in r1, and the bytes to copy in r2.
|
||||
* It returns the number of uncopiable bytes (hopefully zero) in r0.
|
||||
*/
|
||||
ENTRY(__copy_from_user_inatomic)
|
||||
.type __copy_from_user_inatomic, @function
|
||||
FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
|
||||
ENTRY(raw_copy_from_user)
|
||||
.type raw_copy_from_user, @function
|
||||
FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
|
||||
.text.memcpy_common, \
|
||||
.Lend_memcpy_common - __copy_from_user_inatomic)
|
||||
.Lend_memcpy_common - raw_copy_from_user)
|
||||
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
|
||||
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
|
||||
.size raw_copy_from_user, . - raw_copy_from_user
|
||||
|
||||
/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
|
||||
* any uncopiable bytes are zeroed in the target.
|
||||
*/
|
||||
ENTRY(__copy_from_user_zeroing)
|
||||
.type __copy_from_user_zeroing, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
|
||||
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
|
||||
|
||||
/* __copy_to_user_inatomic takes the user target address in r0,
|
||||
/* raw_copy_to_user takes the user target address in r0,
|
||||
* the kernel source in r1, and the bytes to copy in r2.
|
||||
* It returns the number of uncopiable bytes (hopefully zero) in r0.
|
||||
*/
|
||||
ENTRY(__copy_to_user_inatomic)
|
||||
.type __copy_to_user_inatomic, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
ENTRY(raw_copy_to_user)
|
||||
.type raw_copy_to_user, @function
|
||||
FEEDBACK_REENTER(raw_copy_from_user)
|
||||
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
|
||||
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
|
||||
.size raw_copy_to_user, . - raw_copy_to_user
|
||||
|
||||
ENTRY(memcpy)
|
||||
.type memcpy, @function
|
||||
FEEDBACK_REENTER(__copy_from_user_inatomic)
|
||||
FEEDBACK_REENTER(raw_copy_from_user)
|
||||
{ movei r29, IS_MEMCPY }
|
||||
.size memcpy, . - memcpy
|
||||
/* Fall through */
|
||||
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
|
||||
{ bnzt r2, copy_from_user_fixup_loop }
|
||||
|
||||
.Lcopy_from_user_fixup_zero_remainder:
|
||||
{ bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */
|
||||
/* byte-at-a-time loop faulted, so zero the rest. */
|
||||
{ move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
|
||||
1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
|
||||
{ bnzt r3, 1b }
|
||||
2: move lr, r27
|
||||
move lr, r27
|
||||
{ move r0, r2; jrp lr }
|
||||
|
||||
copy_to_user_fixup_loop:
|
||||
|
@@ -51,7 +51,7 @@
|
||||
__v; \
|
||||
})
|
||||
|
||||
#define USERCOPY_FUNC __copy_to_user_inatomic
|
||||
#define USERCOPY_FUNC raw_copy_to_user
|
||||
#define ST1(p, v) _ST((p), st1, (v))
|
||||
#define ST2(p, v) _ST((p), st2, (v))
|
||||
#define ST4(p, v) _ST((p), st4, (v))
|
||||
@@ -62,7 +62,7 @@
|
||||
#define LD8 LD
|
||||
#include "memcpy_64.c"
|
||||
|
||||
#define USERCOPY_FUNC __copy_from_user_inatomic
|
||||
#define USERCOPY_FUNC raw_copy_from_user
|
||||
#define ST1 ST
|
||||
#define ST2 ST
|
||||
#define ST4 ST
|
||||
@@ -73,7 +73,7 @@
|
||||
#define LD8(p) _LD((p), ld)
|
||||
#include "memcpy_64.c"
|
||||
|
||||
#define USERCOPY_FUNC __copy_in_user_inatomic
|
||||
#define USERCOPY_FUNC raw_copy_in_user
|
||||
#define ST1(p, v) _ST((p), st1, (v))
|
||||
#define ST2(p, v) _ST((p), st2, (v))
|
||||
#define ST4(p, v) _ST((p), st4, (v))
|
||||
@@ -83,12 +83,3 @@
|
||||
#define LD4(p) _LD((p), ld4u)
|
||||
#define LD8(p) _LD((p), ld)
|
||||
#include "memcpy_64.c"
|
||||
|
||||
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
unsigned long rc = __copy_from_user_inatomic(to, from, n);
|
||||
if (unlikely(rc))
|
||||
memset(to + n - rc, 0, rc);
|
||||
return rc;
|
||||
}
|
||||
|
新增問題並參考
封鎖使用者