Merge tag 'v4.12-rc3' into next
Sync with mainline to bring in changes in platform drovers dropping calls to sparse_keymap_free() so that we can remove it for good.
This commit is contained in:
@@ -94,7 +94,8 @@ defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux
|
||||
defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux
|
||||
|
||||
# Set some sensible Kbuild defaults
|
||||
KBUILD_IMAGE := $(defaultimage-y)
|
||||
boot := arch/sh/boot
|
||||
KBUILD_IMAGE := $(boot)/$(defaultimage-y)
|
||||
|
||||
#
|
||||
# Choosing incompatible machines durings configuration will result in
|
||||
@@ -186,8 +187,6 @@ cpuincdir-y += cpu-common # Must be last
|
||||
drivers-y += arch/sh/drivers/
|
||||
drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
|
||||
|
||||
boot := arch/sh/boot
|
||||
|
||||
cflags-y += $(foreach d, $(cpuincdir-y), -Iarch/sh/include/$(d)) \
|
||||
$(foreach d, $(machdir-y), -Iarch/sh/include/$(d))
|
||||
|
||||
@@ -211,7 +210,7 @@ BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
|
||||
romImage
|
||||
PHONY += $(BOOT_TARGETS)
|
||||
|
||||
all: $(KBUILD_IMAGE)
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
||||
$(BOOT_TARGETS): vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||
|
@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn)
|
||||
}
|
||||
}
|
||||
|
||||
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine)
|
||||
{
|
||||
/*
|
||||
* I/O space can be accessed via normal processor loads and stores on
|
||||
* this platform but for now we elect not to do this and portable
|
||||
* drivers should not do this anyway.
|
||||
*/
|
||||
if (mmap_state == pci_mmap_io)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Ignore write-combine; for now only return uncached mappings.
|
||||
*/
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_GENERIC_IOMAP
|
||||
|
||||
void __iomem *__pci_ioport_map(struct pci_dev *dev,
|
||||
|
@@ -50,7 +50,7 @@ do { \
|
||||
"i" (sizeof(struct bug_entry))); \
|
||||
} while (0)
|
||||
|
||||
#define __WARN_TAINT(taint) \
|
||||
#define __WARN_FLAGS(flags) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"1:\t.short %O0\n" \
|
||||
@@ -59,7 +59,7 @@ do { \
|
||||
: "n" (TRAPA_BUG_OPCODE), \
|
||||
"i" (__FILE__), \
|
||||
"i" (__LINE__), \
|
||||
"i" (BUGFLAG_TAINT(taint)), \
|
||||
"i" (BUGFLAG_WARNING|(flags)), \
|
||||
"i" (sizeof(struct bug_entry))); \
|
||||
} while (0)
|
||||
|
||||
|
10
arch/sh/include/asm/extable.h
Normal file
10
arch/sh/include/asm/extable.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef __ASM_SH_EXTABLE_H
|
||||
#define __ASM_SH_EXTABLE_H
|
||||
|
||||
#include <asm-generic/extable.h>
|
||||
|
||||
#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
|
||||
#define ARCH_HAS_SEARCH_EXTABLE
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
|
||||
struct pci_dev;
|
||||
|
||||
#define HAVE_PCI_MMAP
|
||||
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine);
|
||||
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
|
||||
|
||||
extern void pcibios_set_master(struct pci_dev *dev);
|
||||
|
||||
/* Dynamic DMA mapping stuff.
|
||||
|
@@ -1,12 +1,8 @@
|
||||
#ifndef __ASM_SH_UACCESS_H
|
||||
#define __ASM_SH_UACCESS_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/segment.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
#include <asm/extable.h>
|
||||
|
||||
#define __addr_ok(addr) \
|
||||
((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
|
||||
@@ -112,19 +108,18 @@ extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
|
||||
|
||||
static __always_inline unsigned long
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
return __copy_user(to, (__force void *)from, n);
|
||||
}
|
||||
|
||||
static __always_inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
return __copy_user((__force void *)to, from, n);
|
||||
}
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
/*
|
||||
* Clear the area and return remaining number of bytes
|
||||
@@ -144,55 +139,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
|
||||
__cl_size; \
|
||||
})
|
||||
|
||||
static inline unsigned long
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long __copy_from = (unsigned long) from;
|
||||
__kernel_size_t __copy_size = (__kernel_size_t) n;
|
||||
|
||||
if (__copy_size && __access_ok(__copy_from, __copy_size))
|
||||
__copy_size = __copy_user(to, from, __copy_size);
|
||||
|
||||
if (unlikely(__copy_size))
|
||||
memset(to + (n - __copy_size), 0, __copy_size);
|
||||
|
||||
return __copy_size;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long __copy_to = (unsigned long) to;
|
||||
__kernel_size_t __copy_size = (__kernel_size_t) n;
|
||||
|
||||
if (__copy_size && __access_ok(__copy_to, __copy_size))
|
||||
return __copy_user(to, from, __copy_size);
|
||||
|
||||
return __copy_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*/
|
||||
struct exception_table_entry {
|
||||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
|
||||
#define ARCH_HAS_SEARCH_EXTABLE
|
||||
#endif
|
||||
|
||||
int fixup_exception(struct pt_regs *regs);
|
||||
|
||||
extern void *set_exception_table_vec(unsigned int vec, void *handler);
|
||||
|
||||
static inline void *set_exception_table_evt(unsigned int evt, void *handler)
|
||||
|
@@ -1,25 +1,2 @@
|
||||
# UAPI Header export list
|
||||
include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
header-y += auxvec.h
|
||||
header-y += byteorder.h
|
||||
header-y += cachectl.h
|
||||
header-y += cpu-features.h
|
||||
header-y += hw_breakpoint.h
|
||||
header-y += ioctls.h
|
||||
header-y += posix_types.h
|
||||
header-y += posix_types_32.h
|
||||
header-y += posix_types_64.h
|
||||
header-y += ptrace.h
|
||||
header-y += ptrace_32.h
|
||||
header-y += ptrace_64.h
|
||||
header-y += setup.h
|
||||
header-y += sigcontext.h
|
||||
header-y += signal.h
|
||||
header-y += sockios.h
|
||||
header-y += stat.h
|
||||
header-y += swab.h
|
||||
header-y += types.h
|
||||
header-y += unistd.h
|
||||
header-y += unistd_32.h
|
||||
header-y += unistd_64.h
|
||||
|
Reference in New Issue
Block a user