Merge 4.9-rc3 into char-misc-next
We need the binder patches in here to build on for other submitted patches to apply properly. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
@@ -326,6 +326,7 @@ struct pci_dev;
|
||||
int acpi_pci_irq_enable (struct pci_dev *dev);
|
||||
void acpi_penalize_isa_irq(int irq, int active);
|
||||
bool acpi_isa_irq_available(int irq);
|
||||
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
|
||||
void acpi_pci_irq_disable (struct pci_dev *dev);
|
||||
|
||||
extern int ec_read(u8 addr, u8 *val);
|
||||
|
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
|
||||
* routines, one at of_clk_init(), and one at platform device probe
|
||||
*/
|
||||
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
|
||||
static void name##_of_clk_init_driver(struct device_node *np) \
|
||||
static void __init name##_of_clk_init_driver(struct device_node *np) \
|
||||
{ \
|
||||
of_node_clear_flag(np, OF_POPULATED); \
|
||||
fn(np); \
|
||||
|
@@ -141,4 +141,26 @@ enum {
|
||||
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
|
||||
void memunmap(void *addr);
|
||||
|
||||
/*
|
||||
* On x86 PAT systems we have memory tracking that keeps track of
|
||||
* the allowed mappings on memory ranges. This tracking works for
|
||||
* all the in-kernel mapping APIs (ioremap*), but where the user
|
||||
* wishes to map a range from a physical device into user memory
|
||||
* the tracking won't be updated. This API is to be used by
|
||||
* drivers which remap physical device pages into userspace,
|
||||
* and wants to make sure they are mapped WC and not UC.
|
||||
*/
|
||||
#ifndef arch_io_reserve_memtype_wc
|
||||
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_io_free_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_IO_H */
|
||||
|
@@ -19,11 +19,15 @@ struct vm_fault;
|
||||
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
|
||||
|
||||
/*
|
||||
* Flags for iomap mappings:
|
||||
* Flags for all iomap mappings:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x02 /* block shared with another file */
|
||||
#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
|
||||
/*
|
||||
* Flags that only need to be reported for IOMAP_REPORT requests:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
|
||||
|
||||
/*
|
||||
* Magic value for blkno:
|
||||
@@ -42,8 +46,9 @@ struct iomap {
|
||||
/*
|
||||
* Flags for iomap_begin / iomap_end. No flag implies a read.
|
||||
*/
|
||||
#define IOMAP_WRITE (1 << 0)
|
||||
#define IOMAP_ZERO (1 << 1)
|
||||
#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
|
||||
#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
|
||||
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
|
||||
|
||||
struct iomap_ops {
|
||||
/*
|
||||
|
@@ -31,7 +31,6 @@
|
||||
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
|
||||
* the last step cherry picks the 2nd arg, we get a zero.
|
||||
*/
|
||||
#define config_enabled(cfg) ___is_defined(cfg)
|
||||
#define __is_defined(x) ___is_defined(x)
|
||||
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
|
||||
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
|
||||
@@ -41,13 +40,13 @@
|
||||
* otherwise. For boolean options, this is equivalent to
|
||||
* IS_ENABLED(CONFIG_FOO).
|
||||
*/
|
||||
#define IS_BUILTIN(option) config_enabled(option)
|
||||
#define IS_BUILTIN(option) __is_defined(option)
|
||||
|
||||
/*
|
||||
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
|
||||
* otherwise.
|
||||
*/
|
||||
#define IS_MODULE(option) config_enabled(option##_MODULE)
|
||||
#define IS_MODULE(option) __is_defined(option##_MODULE)
|
||||
|
||||
/*
|
||||
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
|
||||
|
@@ -1271,10 +1271,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
|
||||
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int foll_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas, int *nonblocking);
|
||||
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
|
@@ -440,33 +440,7 @@ struct zone {
|
||||
seqlock_t span_seqlock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* wait_table -- the array holding the hash table
|
||||
* wait_table_hash_nr_entries -- the size of the hash table array
|
||||
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
||||
*
|
||||
* The purpose of all these is to keep track of the people
|
||||
* waiting for a page to become available and make them
|
||||
* runnable again when possible. The trouble is that this
|
||||
* consumes a lot of space, especially when so few things
|
||||
* wait on pages at a given time. So instead of using
|
||||
* per-page waitqueues, we use a waitqueue hash table.
|
||||
*
|
||||
* The bucket discipline is to sleep on the same queue when
|
||||
* colliding and wake all in that wait queue when removing.
|
||||
* When something wakes, it must check to be sure its page is
|
||||
* truly available, a la thundering herd. The cost of a
|
||||
* collision is great, but given the expected load of the
|
||||
* table, they should be so rare as to be outweighed by the
|
||||
* benefits from the saved space.
|
||||
*
|
||||
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
||||
* primary users of these fields, and in mm/page_alloc.c
|
||||
* free_area_init_core() performs the initialization of them.
|
||||
*/
|
||||
wait_queue_head_t *wait_table;
|
||||
unsigned long wait_table_hash_nr_entries;
|
||||
unsigned long wait_table_bits;
|
||||
int initialized;
|
||||
|
||||
/* Write-intensive fields used from the page allocator */
|
||||
ZONE_PADDING(_pad1_)
|
||||
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
|
||||
|
||||
static inline bool zone_is_initialized(struct zone *zone)
|
||||
{
|
||||
return !!zone->wait_table;
|
||||
return zone->initialized;
|
||||
}
|
||||
|
||||
static inline bool zone_is_empty(struct zone *zone)
|
||||
|
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
|
||||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_disable_local(struct perf_event *event);
|
||||
extern void perf_event_disable_inatomic(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else /* !CONFIG_PERF_EVENTS: */
|
||||
static inline void *
|
||||
|
Reference in New Issue
Block a user