Merge branch 'parisc-4.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux into uaccess.parisc

This commit is contained in:
Al Viro
2017-04-02 10:33:48 -04:00
1124 changed files with 16598 additions and 8178 deletions

View File

@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* vmap range flushes and invalidates. Architecturally, we don't need
* the invalidate, because the CPU should refuse to speculate once an
* area has been flushed, so invalidate is left empty */
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
flush_kernel_dcache_range_asm(start, start + size);
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
void *cursor = vaddr;
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
struct page *page = vmalloc_to_page(cursor);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
flush_kernel_dcache_page(page);
}
flush_kernel_dcache_range_asm(start, start + size);
}
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()

View File

@@ -27,7 +27,8 @@
* that put_user is the same as __put_user, etc.
*/
#define access_ok(type, uaddr, size) (1)
#define access_ok(type, uaddr, size) \
( (uaddr) == (uaddr) )
#define put_user __put_user
#define get_user __get_user
@@ -58,6 +59,15 @@ struct exception_table_entry {
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
* load -EFAULT into %r8 for a read or write fault, and zeroes the target
* register in case of a read fault in get_user().
*/
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
@@ -85,7 +95,7 @@ struct exception_data {
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
register long __gu_val __asm__ ("r9") = 0; \
register long __gu_val; \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
@@ -101,22 +111,23 @@ struct exception_data {
})
#define __get_user_asm(ldx, ptr) \
__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
__asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
: "r"(ptr), "1"(__gu_err));
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(ptr) \
__asm__("\n1:\tldw 0(%%sr2,%2),%0" \
"\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
__asm__(" copy %%r0,%R0\n" \
"1: ldw 0(%%sr2,%2),%0\n" \
"2: ldw 4(%%sr2,%2),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
: "r"(ptr), "1"(__gu_err));
#endif /* !defined(CONFIG_64BIT) */
@@ -142,32 +153,31 @@ struct exception_data {
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
* also be aware that "fixup_put_user_skip_[12]" are executed in the
* context of the fault, and any registers used there must be listed
* as clobbers. In this case only "r1" is used by the current routines.
* r8/r9 are already listed as err/val.
* also be aware that fixups are executed in the context of the fault,
* and any registers used there must be listed as clobbers.
* r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
"1: " stx " %2,0(%%sr2,%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
: "r1")
: "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr2,%1)" \
"\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
"1: stw %2,0(%%sr2,%1)\n" \
"2: stw %R2,4(%%sr2,%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
: "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
: "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */