Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: sparc64: Fix clock event multiplier printf format. sparc64: Use clock{source,events}_calc_mult_shift(). sparc64: Use free_bootmem_late() in mdesc_lmb_free(). sparc: Add alignment and emulation fault perf events. sparc64: Add syscall tracepoint support. sparc: Stop trying to be so fancy and use __builtin_{memcpy,memset}() sparc: Use __builtin_object_size() to validate the buffer size for copy_from_user() sparc64: Add some missing __kprobes annotations to kernel fault paths. sparc64: Use kprobes_built_in() to avoid ifdefs in fault_64.c sparc: Validate that kprobe address is 4-byte aligned. sparc64: Don't specify IRQF_SHARED for LDC interrupts. sparc64: Fix stack debugging IRQ stack regression. sparc64: Fix overly strict range type matching for PCI devices.
This commit is contained in:
@@ -16,8 +16,6 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern void __memmove(void *,const void *,__kernel_size_t);
|
||||
extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
|
||||
extern __kernel_size_t __memset(void *,int,__kernel_size_t);
|
||||
|
||||
#ifndef EXPORT_SYMTAB_STROPS
|
||||
|
||||
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY
|
||||
|
||||
static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
|
||||
{
|
||||
extern void __copy_1page(void *, const void *);
|
||||
|
||||
if(n <= 32) {
|
||||
__builtin_memcpy(to, from, n);
|
||||
} else if (((unsigned int) to & 7) != 0) {
|
||||
/* Destination is not aligned on the double-word boundary */
|
||||
__memcpy(to, from, n);
|
||||
} else {
|
||||
switch(n) {
|
||||
case PAGE_SIZE:
|
||||
__copy_1page(to, from);
|
||||
break;
|
||||
default:
|
||||
__memcpy(to, from, n);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return to;
|
||||
}
|
||||
|
||||
static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
|
||||
{
|
||||
__memcpy(to, from, n);
|
||||
return to;
|
||||
}
|
||||
|
||||
#undef memcpy
|
||||
#define memcpy(t, f, n) \
|
||||
(__builtin_constant_p(n) ? \
|
||||
__constant_memcpy((t),(f),(n)) : \
|
||||
__nonconstant_memcpy((t),(f),(n)))
|
||||
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
|
||||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
|
||||
static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
|
||||
{
|
||||
extern void bzero_1page(void *);
|
||||
extern __kernel_size_t __bzero(void *, __kernel_size_t);
|
||||
|
||||
if(!c) {
|
||||
if(count == PAGE_SIZE)
|
||||
bzero_1page(s);
|
||||
else
|
||||
__bzero(s, count);
|
||||
} else {
|
||||
__memset(s, c, count);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
|
||||
{
|
||||
extern __kernel_size_t __bzero(void *, __kernel_size_t);
|
||||
|
||||
if(!c)
|
||||
__bzero(s, count);
|
||||
else
|
||||
__memset(s, c, count);
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
|
||||
{
|
||||
__memset(s, c, count);
|
||||
return s;
|
||||
}
|
||||
|
||||
#undef memset
|
||||
#define memset(s, c, count) \
|
||||
(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
|
||||
__constant_c_and_count_memset((s), (c), (count)) : \
|
||||
__constant_c_memset((s), (c), (count))) \
|
||||
: __nonconstant_memset((s), (c), (count)))
|
||||
#define memset(s, c, count) __builtin_memset(s, c, count)
|
||||
|
||||
#define __HAVE_ARCH_MEMSCAN
|
||||
|
||||
|
@@ -15,8 +15,6 @@
|
||||
|
||||
#include <asm/asi.h>
|
||||
|
||||
extern void *__memset(void *,int,__kernel_size_t);
|
||||
|
||||
#ifndef EXPORT_SYMTAB_STROPS
|
||||
|
||||
/* First the mem*() things. */
|
||||
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
|
||||
extern void *memmove(void *, const void *, __kernel_size_t);
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY
|
||||
extern void *memcpy(void *, const void *, __kernel_size_t);
|
||||
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
|
||||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
extern void *__builtin_memset(void *,int,__kernel_size_t);
|
||||
|
||||
static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
|
||||
{
|
||||
extern __kernel_size_t __bzero(void *, __kernel_size_t);
|
||||
|
||||
if (!c) {
|
||||
__bzero(s, count);
|
||||
return s;
|
||||
} else
|
||||
return __memset(s, c, count);
|
||||
}
|
||||
|
||||
#undef memset
|
||||
#define memset(s, c, count) \
|
||||
((__builtin_constant_p(count) && (count) <= 32) ? \
|
||||
__builtin_memset((s), (c), (count)) : \
|
||||
(__builtin_constant_p(c) ? \
|
||||
__constant_memset((s), (c), (count)) : \
|
||||
__memset((s), (c), (count))))
|
||||
#define memset(s, c, count) __builtin_memset(s, c, count)
|
||||
|
||||
#define __HAVE_ARCH_MEMSCAN
|
||||
|
||||
|
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
/* flag bit 8 is available */
|
||||
#define TIF_SECCOMP 9 /* secure computing */
|
||||
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
|
||||
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
|
||||
/* flag bit 11 is available */
|
||||
/* NOTE: Thread flags >= 12 should be ones we have no interest
|
||||
* in using in assembly, else we can't use the mask as
|
||||
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
#define _TIF_32BIT (1<<TIF_32BIT)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
|
||||
return __copy_user(to, (__force void __user *) from, n);
|
||||
}
|
||||
|
||||
extern void copy_from_user_overflow(void)
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
__compiletime_error("copy_from_user() buffer size is not provably correct")
|
||||
#else
|
||||
__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
#endif
|
||||
;
|
||||
|
||||
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
||||
if (unlikely(sz != -1 && sz < n)) {
|
||||
copy_from_user_overflow();
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (n && __access_ok((unsigned long) from, n))
|
||||
return __copy_user((__force void __user *) to, from, n);
|
||||
else
|
||||
|
@@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/errno.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
|
||||
|
||||
extern int __get_user_bad(void);
|
||||
|
||||
extern void copy_from_user_overflow(void)
|
||||
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
||||
__compiletime_error("copy_from_user() buffer size is not provably correct")
|
||||
#else
|
||||
__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
||||
#endif
|
||||
;
|
||||
|
||||
extern unsigned long __must_check ___copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
unsigned long size);
|
||||
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
|
||||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long size)
|
||||
{
|
||||
unsigned long ret = ___copy_from_user(to, from, size);
|
||||
unsigned long ret = (unsigned long) -EFAULT;
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
||||
if (unlikely(ret))
|
||||
ret = copy_from_user_fixup(to, from, size);
|
||||
if (likely(sz == -1 || sz >= size)) {
|
||||
ret = ___copy_from_user(to, from, size);
|
||||
if (unlikely(ret))
|
||||
ret = copy_from_user_fixup(to, from, size);
|
||||
} else {
|
||||
copy_from_user_overflow();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#define __copy_from_user copy_from_user
|
||||
|
@@ -398,7 +398,7 @@
|
||||
#define __NR_perf_event_open 327
|
||||
#define __NR_recvmmsg 328
|
||||
|
||||
#define NR_SYSCALLS 329
|
||||
#define NR_syscalls 329
|
||||
|
||||
#ifdef __32bit_syscall_numbers__
|
||||
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
|
||||
|
Reference in New Issue
Block a user