Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc changes from David S. Miller: "This has the generic strncpy_from_user() implementation architectures can now use, which we've been developing on linux-arch over the past few days. For good measure I ran both a 32-bit and a 64-bit glibc testsuite run, and the latter of which pointed out an adjustment I needed to make to sparc's user_addr_max() definition. Linus, you were right, STACK_TOP was not the right thing to use, even on sparc itself :-) From Sam Ravnborg, we have a conversion of sparc32 over to the common alloc_thread_info_node(), since the aspect which originally blocked our doing so (sun4c) has been removed." Fix up trivial arch/sparc/Kconfig and lib/Makefile conflicts. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc: Fix user_addr_max() definition. lib: Sparc's strncpy_from_user is generic enough, move under lib/ kernel: Move REPEAT_BYTE definition into linux/kernel.h sparc: Increase portability of strncpy_from_user() implementation. sparc: Optimize strncpy_from_user() zero byte search. sparc: Add full proper error handling to strncpy_from_user(). sparc32: use the common implementation of alloc_thread_info_node()
This commit is contained in:
@@ -34,12 +34,12 @@ config SPARC
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
select GENERIC_ATOMIC64
|
||||
select CLZ_TAB
|
||||
select ARCH_THREAD_INFO_ALLOCATOR
|
||||
select ARCH_USES_GETTIMEOFFSET
|
||||
|
||||
config SPARC64
|
||||
|
@@ -42,7 +42,9 @@
|
||||
#define TASK_SIZE_OF(tsk) \
|
||||
(test_tsk_thread_flag(tsk,TIF_32BIT) ? \
|
||||
(1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
|
||||
#define TASK_SIZE TASK_SIZE_OF(current)
|
||||
#define TASK_SIZE \
|
||||
(test_thread_flag(TIF_32BIT) ? \
|
||||
(1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
|
||||
|
@@ -77,18 +77,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
/*
|
||||
* thread information allocation
|
||||
*/
|
||||
#define THREAD_INFO_ORDER 1
|
||||
|
||||
struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node);
|
||||
void free_thread_info(struct thread_info *);
|
||||
#define THREAD_SIZE_ORDER 1
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Size of kernel stack for each process.
|
||||
* Observe the order of get_free_pages() in alloc_thread_info_node().
|
||||
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
|
||||
*/
|
||||
/* Size of kernel stack for each process */
|
||||
#define THREAD_SIZE (2 * PAGE_SIZE)
|
||||
|
||||
/*
|
||||
|
@@ -5,4 +5,10 @@
|
||||
#else
|
||||
#include <asm/uaccess_32.h>
|
||||
#endif
|
||||
|
||||
#define user_addr_max() \
|
||||
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
|
||||
|
||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
#endif
|
||||
|
@@ -304,16 +304,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
|
||||
return n;
|
||||
}
|
||||
|
||||
extern long __strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
static inline long strncpy_from_user(char *dest, const char __user *src, long count)
|
||||
{
|
||||
if (__access_ok((unsigned long) src, count))
|
||||
return __strncpy_from_user(dest, src, count);
|
||||
else
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
extern long __strlen_user(const char __user *);
|
||||
extern long __strnlen_user(const char __user *, long len);
|
||||
|
||||
|
@@ -257,10 +257,6 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
|
||||
|
||||
#define clear_user __clear_user
|
||||
|
||||
extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
#define strncpy_from_user __strncpy_from_user
|
||||
|
||||
extern long __strlen_user(const char __user *);
|
||||
extern long __strnlen_user(const char __user *, long len);
|
||||
|
||||
|
@@ -10,7 +10,7 @@ lib-y += strlen.o
|
||||
lib-y += checksum_$(BITS).o
|
||||
lib-$(CONFIG_SPARC32) += blockops.o
|
||||
lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
|
||||
lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
|
||||
lib-y += strlen_user_$(BITS).o
|
||||
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
|
||||
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
|
||||
lib-$(CONFIG_SPARC64) += atomic_64.o
|
||||
|
@@ -33,9 +33,6 @@ EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memmove);
|
||||
EXPORT_SYMBOL(__bzero);
|
||||
|
||||
/* Moving data to/from/in userspace. */
|
||||
EXPORT_SYMBOL(__strncpy_from_user);
|
||||
|
||||
/* Networking helper routines. */
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
|
||||
|
@@ -1,47 +0,0 @@
|
||||
/* strncpy_from_user.S: Sparc strncpy from userspace.
|
||||
*
|
||||
* Copyright(C) 1996 David S. Miller
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
.text
|
||||
|
||||
/* Must return:
|
||||
*
|
||||
* -EFAULT for an exception
|
||||
* count if we hit the buffer limit
|
||||
* bytes copied if we hit a null byte
|
||||
*/
|
||||
|
||||
ENTRY(__strncpy_from_user)
|
||||
/* %o0=dest, %o1=src, %o2=count */
|
||||
mov %o2, %o3
|
||||
1:
|
||||
subcc %o2, 1, %o2
|
||||
bneg 2f
|
||||
nop
|
||||
10:
|
||||
ldub [%o1], %o4
|
||||
add %o0, 1, %o0
|
||||
cmp %o4, 0
|
||||
add %o1, 1, %o1
|
||||
bne 1b
|
||||
stb %o4, [%o0 - 1]
|
||||
2:
|
||||
add %o2, 1, %o0
|
||||
retl
|
||||
sub %o3, %o0, %o0
|
||||
ENDPROC(__strncpy_from_user)
|
||||
|
||||
.section .fixup,#alloc,#execinstr
|
||||
.align 4
|
||||
4:
|
||||
retl
|
||||
mov -EFAULT, %o0
|
||||
|
||||
.section __ex_table,#alloc
|
||||
.align 4
|
||||
.word 10b, 4b
|
@@ -1,133 +0,0 @@
|
||||
/*
|
||||
* strncpy_from_user.S: Sparc64 strncpy from userspace.
|
||||
*
|
||||
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
.data
|
||||
.align 8
|
||||
0: .xword 0x0101010101010101
|
||||
|
||||
.text
|
||||
|
||||
/* Must return:
|
||||
*
|
||||
* -EFAULT for an exception
|
||||
* count if we hit the buffer limit
|
||||
* bytes copied if we hit a null byte
|
||||
* (without the null byte)
|
||||
*
|
||||
* This implementation assumes:
|
||||
* %o1 is 8 aligned => !(%o2 & 7)
|
||||
* %o0 is 8 aligned (if not, it will be slooooow, but will work)
|
||||
*
|
||||
* This is optimized for the common case:
|
||||
* in my stats, 90% of src are 8 aligned (even on sparc32)
|
||||
* and average length is 18 or so.
|
||||
*/
|
||||
|
||||
ENTRY(__strncpy_from_user)
|
||||
/* %o0=dest, %o1=src, %o2=count */
|
||||
andcc %o1, 7, %g0 ! IEU1 Group
|
||||
bne,pn %icc, 30f ! CTI
|
||||
add %o0, %o2, %g3 ! IEU0
|
||||
60: ldxa [%o1] %asi, %g1 ! Load Group
|
||||
brlez,pn %o2, 10f ! CTI
|
||||
mov %o0, %o3 ! IEU0
|
||||
50: sethi %hi(0b), %o4 ! IEU0 Group
|
||||
ldx [%o4 + %lo(0b)], %o4 ! Load
|
||||
sllx %o4, 7, %o5 ! IEU1 Group
|
||||
1: sub %g1, %o4, %g2 ! IEU0 Group
|
||||
stx %g1, [%o0] ! Store
|
||||
add %o0, 8, %o0 ! IEU1
|
||||
andcc %g2, %o5, %g0 ! IEU1 Group
|
||||
bne,pn %xcc, 5f ! CTI
|
||||
add %o1, 8, %o1 ! IEU0
|
||||
cmp %o0, %g3 ! IEU1 Group
|
||||
bl,a,pt %xcc, 1b ! CTI
|
||||
61: ldxa [%o1] %asi, %g1 ! Load
|
||||
10: retl ! CTI Group
|
||||
mov %o2, %o0 ! IEU0
|
||||
5: srlx %g2, 32, %g7 ! IEU0 Group
|
||||
sethi %hi(0xff00), %o4 ! IEU1
|
||||
andcc %g7, %o5, %g0 ! IEU1 Group
|
||||
be,pn %icc, 2f ! CTI
|
||||
or %o4, %lo(0xff00), %o4 ! IEU0
|
||||
srlx %g1, 48, %g7 ! IEU0 Group
|
||||
andcc %g7, %o4, %g0 ! IEU1 Group
|
||||
be,pn %icc, 50f ! CTI
|
||||
andcc %g7, 0xff, %g0 ! IEU1 Group
|
||||
be,pn %icc, 51f ! CTI
|
||||
srlx %g1, 32, %g7 ! IEU0
|
||||
andcc %g7, %o4, %g0 ! IEU1 Group
|
||||
be,pn %icc, 52f ! CTI
|
||||
andcc %g7, 0xff, %g0 ! IEU1 Group
|
||||
be,pn %icc, 53f ! CTI
|
||||
2: andcc %g2, %o5, %g0 ! IEU1 Group
|
||||
be,pn %icc, 2f ! CTI
|
||||
srl %g1, 16, %g7 ! IEU0
|
||||
andcc %g7, %o4, %g0 ! IEU1 Group
|
||||
be,pn %icc, 54f ! CTI
|
||||
andcc %g7, 0xff, %g0 ! IEU1 Group
|
||||
be,pn %icc, 55f ! CTI
|
||||
andcc %g1, %o4, %g0 ! IEU1 Group
|
||||
be,pn %icc, 56f ! CTI
|
||||
andcc %g1, 0xff, %g0 ! IEU1 Group
|
||||
be,a,pn %icc, 57f ! CTI
|
||||
sub %o0, %o3, %o0 ! IEU0
|
||||
2: cmp %o0, %g3 ! IEU1 Group
|
||||
bl,a,pt %xcc, 50b ! CTI
|
||||
62: ldxa [%o1] %asi, %g1 ! Load
|
||||
retl ! CTI Group
|
||||
mov %o2, %o0 ! IEU0
|
||||
50: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 8, %o0
|
||||
51: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 7, %o0
|
||||
52: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 6, %o0
|
||||
53: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 5, %o0
|
||||
54: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 4, %o0
|
||||
55: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 3, %o0
|
||||
56: sub %o0, %o3, %o0
|
||||
retl
|
||||
sub %o0, 2, %o0
|
||||
57: retl
|
||||
sub %o0, 1, %o0
|
||||
30: brlez,pn %o2, 3f
|
||||
sub %g0, %o2, %o3
|
||||
add %o0, %o2, %o0
|
||||
63: lduba [%o1] %asi, %o4
|
||||
1: add %o1, 1, %o1
|
||||
brz,pn %o4, 2f
|
||||
stb %o4, [%o0 + %o3]
|
||||
addcc %o3, 1, %o3
|
||||
bne,pt %xcc, 1b
|
||||
64: lduba [%o1] %asi, %o4
|
||||
3: retl
|
||||
mov %o2, %o0
|
||||
2: retl
|
||||
add %o2, %o3, %o0
|
||||
ENDPROC(__strncpy_from_user)
|
||||
|
||||
.section __ex_table,"a"
|
||||
.align 4
|
||||
.word 60b, __retl_efault
|
||||
.word 61b, __retl_efault
|
||||
.word 62b, __retl_efault
|
||||
.word 63b, __retl_efault
|
||||
.word 64b, __retl_efault
|
||||
.previous
|
@@ -1,4 +1,5 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
void copy_from_user_overflow(void)
|
||||
|
@@ -467,33 +467,6 @@ void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* On the SRMMU we do not have the problems with limited tlb entries
|
||||
* for mapping kernel pages, so we just take things from the free page
|
||||
* pool. As a side effect we are putting a little too much pressure
|
||||
* on the gfp() subsystem. This setup also makes the logic of the
|
||||
* iommu mapping code a lot easier as we can transparently handle
|
||||
* mappings on the kernel stack without any special code.
|
||||
*/
|
||||
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
|
||||
{
|
||||
struct thread_info *ret;
|
||||
|
||||
ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
|
||||
THREAD_INFO_ORDER);
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
if (ret)
|
||||
memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
|
||||
#endif /* DEBUG_STACK_USAGE */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *ti)
|
||||
{
|
||||
free_pages((unsigned long)ti, THREAD_INFO_ORDER);
|
||||
}
|
||||
|
||||
/* tsunami.S */
|
||||
extern void tsunami_flush_cache_all(void);
|
||||
extern void tsunami_flush_cache_mm(struct mm_struct *mm);
|
||||
|
Reference in New Issue
Block a user