|
|
|
@@ -12,8 +12,6 @@
|
|
|
|
|
#define _ASM_UACCESS_H
|
|
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
|
#include <linux/string.h>
|
|
|
|
|
#include <asm/asm-eva.h>
|
|
|
|
|
#include <asm/extable.h>
|
|
|
|
@@ -71,9 +69,6 @@ extern u64 __ua_limit;
|
|
|
|
|
#define USER_DS ((mm_segment_t) { __UA_LIMIT })
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#define VERIFY_READ 0
|
|
|
|
|
#define VERIFY_WRITE 1
|
|
|
|
|
|
|
|
|
|
#define get_ds() (KERNEL_DS)
|
|
|
|
|
#define get_fs() (current_thread_info()->addr_limit)
|
|
|
|
|
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
|
|
|
@@ -93,7 +88,7 @@ static inline bool eva_kernel_access(void)
|
|
|
|
|
if (!IS_ENABLED(CONFIG_EVA))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return segment_eq(get_fs(), get_ds());
|
|
|
|
|
return uaccess_kernel();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -133,23 +128,14 @@ static inline bool eva_kernel_access(void)
|
|
|
|
|
* this function, memory access functions may still return -EFAULT.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#define __access_mask get_fs().seg
|
|
|
|
|
|
|
|
|
|
#define __access_ok(addr, size, mask) \
|
|
|
|
|
({ \
|
|
|
|
|
unsigned long __addr = (unsigned long) (addr); \
|
|
|
|
|
unsigned long __size = size; \
|
|
|
|
|
unsigned long __mask = mask; \
|
|
|
|
|
unsigned long __ok; \
|
|
|
|
|
\
|
|
|
|
|
__chk_user_ptr(addr); \
|
|
|
|
|
__ok = (signed long)(__mask & (__addr | (__addr + __size) | \
|
|
|
|
|
__ua_size(__size))); \
|
|
|
|
|
__ok == 0; \
|
|
|
|
|
})
|
|
|
|
|
static inline int __access_ok(const void __user *p, unsigned long size)
|
|
|
|
|
{
|
|
|
|
|
unsigned long addr = (unsigned long)p;
|
|
|
|
|
return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define access_ok(type, addr, size) \
|
|
|
|
|
likely(__access_ok((addr), (size), __access_mask))
|
|
|
|
|
likely(__access_ok((addr), (size)))
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* put_user: - Write a simple value into user space.
|
|
|
|
@@ -811,8 +797,30 @@ extern void __put_user_unaligned_unknown(void);
|
|
|
|
|
|
|
|
|
|
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
|
|
|
|
|
|
|
|
|
|
#ifndef CONFIG_EVA
|
|
|
|
|
#define __invoke_copy_to_user(to, from, n) \
|
|
|
|
|
#define __invoke_copy_from(func, to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
register void *__cu_to_r __asm__("$4"); \
|
|
|
|
|
register const void __user *__cu_from_r __asm__("$5"); \
|
|
|
|
|
register long __cu_len_r __asm__("$6"); \
|
|
|
|
|
\
|
|
|
|
|
__cu_to_r = (to); \
|
|
|
|
|
__cu_from_r = (from); \
|
|
|
|
|
__cu_len_r = (n); \
|
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
|
".set\tnoreorder\n\t" \
|
|
|
|
|
__MODULE_JAL(func) \
|
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
|
__UA_ADDU "\t$1, %1, %2\n\t" \
|
|
|
|
|
".set\tat\n\t" \
|
|
|
|
|
".set\treorder" \
|
|
|
|
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
|
|
|
: \
|
|
|
|
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
|
|
|
DADDI_SCRATCH, "memory"); \
|
|
|
|
|
__cu_len_r; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_to(func, to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
register void __user *__cu_to_r __asm__("$4"); \
|
|
|
|
|
register const void *__cu_from_r __asm__("$5"); \
|
|
|
|
@@ -822,161 +830,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
|
|
|
|
|
__cu_from_r = (from); \
|
|
|
|
|
__cu_len_r = (n); \
|
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
|
__MODULE_JAL(__copy_user) \
|
|
|
|
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
|
|
|
: \
|
|
|
|
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
|
|
|
DADDI_SCRATCH, "memory"); \
|
|
|
|
|
__cu_len_r; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_to_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_to_user(to, from, n)
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* __copy_to_user: - Copy a block of data into user space, with less checking.
|
|
|
|
|
* @to: Destination address, in user space.
|
|
|
|
|
* @from: Source address, in kernel space.
|
|
|
|
|
* @n: Number of bytes to copy.
|
|
|
|
|
*
|
|
|
|
|
* Context: User context only. This function may sleep if pagefaults are
|
|
|
|
|
* enabled.
|
|
|
|
|
*
|
|
|
|
|
* Copy data from kernel space to user space. Caller must check
|
|
|
|
|
* the specified block with access_ok() before calling this function.
|
|
|
|
|
*
|
|
|
|
|
* Returns number of bytes that could not be copied.
|
|
|
|
|
* On success, this will be zero.
|
|
|
|
|
*/
|
|
|
|
|
#define __copy_to_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void __user *__cu_to; \
|
|
|
|
|
const void *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
\
|
|
|
|
|
check_object_size(__cu_from, __cu_len, true); \
|
|
|
|
|
might_fault(); \
|
|
|
|
|
\
|
|
|
|
|
if (eva_kernel_access()) \
|
|
|
|
|
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
else \
|
|
|
|
|
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
|
|
|
|
|
|
|
|
|
|
#define __copy_to_user_inatomic(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void __user *__cu_to; \
|
|
|
|
|
const void *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
\
|
|
|
|
|
check_object_size(__cu_from, __cu_len, true); \
|
|
|
|
|
\
|
|
|
|
|
if (eva_kernel_access()) \
|
|
|
|
|
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
else \
|
|
|
|
|
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
#define __copy_from_user_inatomic(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void *__cu_to; \
|
|
|
|
|
const void __user *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
\
|
|
|
|
|
check_object_size(__cu_to, __cu_len, false); \
|
|
|
|
|
\
|
|
|
|
|
if (eva_kernel_access()) \
|
|
|
|
|
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
|
|
|
|
|
__cu_from,\
|
|
|
|
|
__cu_len);\
|
|
|
|
|
else \
|
|
|
|
|
__cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* copy_to_user: - Copy a block of data into user space.
|
|
|
|
|
* @to: Destination address, in user space.
|
|
|
|
|
* @from: Source address, in kernel space.
|
|
|
|
|
* @n: Number of bytes to copy.
|
|
|
|
|
*
|
|
|
|
|
* Context: User context only. This function may sleep if pagefaults are
|
|
|
|
|
* enabled.
|
|
|
|
|
*
|
|
|
|
|
* Copy data from kernel space to user space.
|
|
|
|
|
*
|
|
|
|
|
* Returns number of bytes that could not be copied.
|
|
|
|
|
* On success, this will be zero.
|
|
|
|
|
*/
|
|
|
|
|
#define copy_to_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void __user *__cu_to; \
|
|
|
|
|
const void *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
\
|
|
|
|
|
check_object_size(__cu_from, __cu_len, true); \
|
|
|
|
|
\
|
|
|
|
|
if (eva_kernel_access()) { \
|
|
|
|
|
__cu_len = __invoke_copy_to_kernel(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} else { \
|
|
|
|
|
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
|
|
|
|
|
might_fault(); \
|
|
|
|
|
__cu_len = __invoke_copy_to_user(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} \
|
|
|
|
|
} \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
#ifndef CONFIG_EVA
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
register void *__cu_to_r __asm__("$4"); \
|
|
|
|
|
register const void __user *__cu_from_r __asm__("$5"); \
|
|
|
|
|
register long __cu_len_r __asm__("$6"); \
|
|
|
|
|
\
|
|
|
|
|
__cu_to_r = (to); \
|
|
|
|
|
__cu_from_r = (from); \
|
|
|
|
|
__cu_len_r = (n); \
|
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
|
".set\tnoreorder\n\t" \
|
|
|
|
|
__MODULE_JAL(__copy_user) \
|
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
|
__UA_ADDU "\t$1, %1, %2\n\t" \
|
|
|
|
|
".set\tat\n\t" \
|
|
|
|
|
".set\treorder" \
|
|
|
|
|
__MODULE_JAL(func) \
|
|
|
|
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
|
|
|
: \
|
|
|
|
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
|
|
@@ -985,261 +839,78 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user(to, from, n)
|
|
|
|
|
__invoke_copy_from(__copy_user, to, from, n)
|
|
|
|
|
|
|
|
|
|
/* For userland <-> userland operations */
|
|
|
|
|
#define ___invoke_copy_in_user(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user(to, from, n)
|
|
|
|
|
#define __invoke_copy_to_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_to(__copy_user, to, from, n)
|
|
|
|
|
|
|
|
|
|
/* For kernel <-> kernel operations */
|
|
|
|
|
#define ___invoke_copy_in_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user(to, from, n)
|
|
|
|
|
__invoke_copy_from(__copy_user, to, from, n)
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_user_inatomic(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
register void *__cu_to_r __asm__("$4"); \
|
|
|
|
|
register const void __user *__cu_from_r __asm__("$5"); \
|
|
|
|
|
register long __cu_len_r __asm__("$6"); \
|
|
|
|
|
\
|
|
|
|
|
__cu_to_r = (to); \
|
|
|
|
|
__cu_from_r = (from); \
|
|
|
|
|
__cu_len_r = (n); \
|
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
|
".set\tnoreorder\n\t" \
|
|
|
|
|
__MODULE_JAL(__copy_user_inatomic) \
|
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
|
__UA_ADDU "\t$1, %1, %2\n\t" \
|
|
|
|
|
".set\tat\n\t" \
|
|
|
|
|
".set\treorder" \
|
|
|
|
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
|
|
|
: \
|
|
|
|
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
|
|
|
DADDI_SCRATCH, "memory"); \
|
|
|
|
|
__cu_len_r; \
|
|
|
|
|
})
|
|
|
|
|
#ifndef CONFIG_EVA
|
|
|
|
|
#define __invoke_copy_from_user(to, from, n) \
|
|
|
|
|
__invoke_copy_from(__copy_user, to, from, n)
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_inatomic(to, from, n) \
|
|
|
|
|
#define __invoke_copy_to_user(to, from, n) \
|
|
|
|
|
__invoke_copy_to(__copy_user, to, from, n)
|
|
|
|
|
|
|
|
|
|
#define ___invoke_copy_in_user(to, from, n) \
|
|
|
|
|
__invoke_copy_from(__copy_user, to, from, n)
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
/* EVA specific functions */
|
|
|
|
|
|
|
|
|
|
extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
|
|
|
|
|
size_t __n);
|
|
|
|
|
extern size_t __copy_from_user_eva(void *__to, const void *__from,
|
|
|
|
|
size_t __n);
|
|
|
|
|
extern size_t __copy_to_user_eva(void *__to, const void *__from,
|
|
|
|
|
size_t __n);
|
|
|
|
|
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
|
|
|
|
|
({ \
|
|
|
|
|
register void *__cu_to_r __asm__("$4"); \
|
|
|
|
|
register const void __user *__cu_from_r __asm__("$5"); \
|
|
|
|
|
register long __cu_len_r __asm__("$6"); \
|
|
|
|
|
\
|
|
|
|
|
__cu_to_r = (to); \
|
|
|
|
|
__cu_from_r = (from); \
|
|
|
|
|
__cu_len_r = (n); \
|
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
|
".set\tnoreorder\n\t" \
|
|
|
|
|
__MODULE_JAL(func_ptr) \
|
|
|
|
|
".set\tnoat\n\t" \
|
|
|
|
|
__UA_ADDU "\t$1, %1, %2\n\t" \
|
|
|
|
|
".set\tat\n\t" \
|
|
|
|
|
".set\treorder" \
|
|
|
|
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
|
|
|
: \
|
|
|
|
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
|
|
|
DADDI_SCRATCH, "memory"); \
|
|
|
|
|
__cu_len_r; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
|
|
|
|
|
({ \
|
|
|
|
|
register void *__cu_to_r __asm__("$4"); \
|
|
|
|
|
register const void __user *__cu_from_r __asm__("$5"); \
|
|
|
|
|
register long __cu_len_r __asm__("$6"); \
|
|
|
|
|
\
|
|
|
|
|
__cu_to_r = (to); \
|
|
|
|
|
__cu_from_r = (from); \
|
|
|
|
|
__cu_len_r = (n); \
|
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
|
__MODULE_JAL(func_ptr) \
|
|
|
|
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
|
|
|
: \
|
|
|
|
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
|
|
|
DADDI_SCRATCH, "memory"); \
|
|
|
|
|
__cu_len_r; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Source or destination address is in userland. We need to go through
|
|
|
|
|
* the TLB
|
|
|
|
|
*/
|
|
|
|
|
#define __invoke_copy_from_user(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_user_inatomic(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_eva_generic(to, from, n, \
|
|
|
|
|
__copy_user_inatomic_eva)
|
|
|
|
|
__invoke_copy_from(__copy_from_user_eva, to, from, n)
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_to_user(to, from, n) \
|
|
|
|
|
__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
|
|
|
|
|
__invoke_copy_to(__copy_to_user_eva, to, from, n)
|
|
|
|
|
|
|
|
|
|
#define ___invoke_copy_in_user(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Source or destination address in the kernel. We are not going through
|
|
|
|
|
* the TLB
|
|
|
|
|
*/
|
|
|
|
|
#define __invoke_copy_from_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
|
|
|
|
|
|
|
|
|
|
#define __invoke_copy_to_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
|
|
|
|
|
|
|
|
|
|
#define ___invoke_copy_in_kernel(to, from, n) \
|
|
|
|
|
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
|
|
|
|
|
__invoke_copy_from(__copy_in_user_eva, to, from, n)
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_EVA */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* __copy_from_user: - Copy a block of data from user space, with less checking.
|
|
|
|
|
* @to: Destination address, in kernel space.
|
|
|
|
|
* @from: Source address, in user space.
|
|
|
|
|
* @n: Number of bytes to copy.
|
|
|
|
|
*
|
|
|
|
|
* Context: User context only. This function may sleep if pagefaults are
|
|
|
|
|
* enabled.
|
|
|
|
|
*
|
|
|
|
|
* Copy data from user space to kernel space. Caller must check
|
|
|
|
|
* the specified block with access_ok() before calling this function.
|
|
|
|
|
*
|
|
|
|
|
* Returns number of bytes that could not be copied.
|
|
|
|
|
* On success, this will be zero.
|
|
|
|
|
*
|
|
|
|
|
* If some data could not be copied, this function will pad the copied
|
|
|
|
|
* data to the requested size using zero bytes.
|
|
|
|
|
*/
|
|
|
|
|
#define __copy_from_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void *__cu_to; \
|
|
|
|
|
const void __user *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
\
|
|
|
|
|
check_object_size(__cu_to, __cu_len, false); \
|
|
|
|
|
\
|
|
|
|
|
if (eva_kernel_access()) { \
|
|
|
|
|
__cu_len = __invoke_copy_from_kernel(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} else { \
|
|
|
|
|
might_fault(); \
|
|
|
|
|
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
static inline unsigned long
|
|
|
|
|
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
|
|
|
{
|
|
|
|
|
if (eva_kernel_access())
|
|
|
|
|
return __invoke_copy_to_kernel(to, from, n);
|
|
|
|
|
else
|
|
|
|
|
return __invoke_copy_to_user(to, from, n);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* copy_from_user: - Copy a block of data from user space.
|
|
|
|
|
* @to: Destination address, in kernel space.
|
|
|
|
|
* @from: Source address, in user space.
|
|
|
|
|
* @n: Number of bytes to copy.
|
|
|
|
|
*
|
|
|
|
|
* Context: User context only. This function may sleep if pagefaults are
|
|
|
|
|
* enabled.
|
|
|
|
|
*
|
|
|
|
|
* Copy data from user space to kernel space.
|
|
|
|
|
*
|
|
|
|
|
* Returns number of bytes that could not be copied.
|
|
|
|
|
* On success, this will be zero.
|
|
|
|
|
*
|
|
|
|
|
* If some data could not be copied, this function will pad the copied
|
|
|
|
|
* data to the requested size using zero bytes.
|
|
|
|
|
*/
|
|
|
|
|
#define copy_from_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void *__cu_to; \
|
|
|
|
|
const void __user *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
\
|
|
|
|
|
check_object_size(__cu_to, __cu_len, false); \
|
|
|
|
|
\
|
|
|
|
|
if (eva_kernel_access()) { \
|
|
|
|
|
__cu_len = __invoke_copy_from_kernel(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} else { \
|
|
|
|
|
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
|
|
|
|
|
might_fault(); \
|
|
|
|
|
__cu_len = __invoke_copy_from_user(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} else { \
|
|
|
|
|
memset(__cu_to, 0, __cu_len); \
|
|
|
|
|
} \
|
|
|
|
|
} \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
static inline unsigned long
|
|
|
|
|
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
|
|
|
{
|
|
|
|
|
if (eva_kernel_access())
|
|
|
|
|
return __invoke_copy_from_kernel(to, from, n);
|
|
|
|
|
else
|
|
|
|
|
return __invoke_copy_from_user(to, from, n);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define __copy_in_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void __user *__cu_to; \
|
|
|
|
|
const void __user *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
if (eva_kernel_access()) { \
|
|
|
|
|
__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} else { \
|
|
|
|
|
might_fault(); \
|
|
|
|
|
__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
#define INLINE_COPY_FROM_USER
|
|
|
|
|
#define INLINE_COPY_TO_USER
|
|
|
|
|
|
|
|
|
|
#define copy_in_user(to, from, n) \
|
|
|
|
|
({ \
|
|
|
|
|
void __user *__cu_to; \
|
|
|
|
|
const void __user *__cu_from; \
|
|
|
|
|
long __cu_len; \
|
|
|
|
|
\
|
|
|
|
|
__cu_to = (to); \
|
|
|
|
|
__cu_from = (from); \
|
|
|
|
|
__cu_len = (n); \
|
|
|
|
|
if (eva_kernel_access()) { \
|
|
|
|
|
__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} else { \
|
|
|
|
|
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
|
|
|
|
|
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
|
|
|
|
|
might_fault(); \
|
|
|
|
|
__cu_len = ___invoke_copy_in_user(__cu_to, \
|
|
|
|
|
__cu_from, \
|
|
|
|
|
__cu_len); \
|
|
|
|
|
} \
|
|
|
|
|
} \
|
|
|
|
|
__cu_len; \
|
|
|
|
|
})
|
|
|
|
|
static inline unsigned long
|
|
|
|
|
raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
|
|
|
|
|
{
|
|
|
|
|
if (eva_kernel_access())
|
|
|
|
|
return ___invoke_copy_in_kernel(to, from, n);
|
|
|
|
|
else
|
|
|
|
|
return ___invoke_copy_in_user(to, from, n);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
|
|
|
|
|
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
|
|
|
|
|