Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
This commit is contained in:
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
|
||||
OSC_PCI_EXPRESS_PME_CONTROL | \
|
||||
OSC_PCI_EXPRESS_AER_CONTROL | \
|
||||
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
|
||||
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
||||
u32 *mask, u32 req);
|
||||
extern void acpi_early_init(void);
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
|
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
|
||||
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
|
||||
int cgroup_scan_tasks(struct cgroup_scanner *scan);
|
||||
int cgroup_attach_task(struct cgroup *, struct task_struct *);
|
||||
int cgroup_attach_task_current_cg(struct task_struct *);
|
||||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
|
||||
|
||||
static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
|
||||
{
|
||||
return cgroup_attach_task_all(current, tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
|
||||
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
|
||||
}
|
||||
|
||||
/* No cgroups - nothing to do */
|
||||
static inline int cgroup_attach_task_all(struct task_struct *from,
|
||||
struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
||||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
||||
|
||||
/*
|
||||
|
@@ -65,14 +65,14 @@
|
||||
FAN_ALL_PERM_EVENTS |\
|
||||
FAN_Q_OVERFLOW)
|
||||
|
||||
#define FANOTIFY_METADATA_VERSION 1
|
||||
#define FANOTIFY_METADATA_VERSION 2
|
||||
|
||||
struct fanotify_event_metadata {
|
||||
__u32 event_len;
|
||||
__u32 vers;
|
||||
__s32 fd;
|
||||
__u64 mask;
|
||||
__s64 pid;
|
||||
__s32 fd;
|
||||
__s32 pid;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct fanotify_response {
|
||||
@@ -95,11 +95,4 @@ struct fanotify_response {
|
||||
(long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
|
||||
(long)(meta)->event_len <= (long)(len))
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
struct fanotify_wait {
|
||||
struct fsnotify_event *event;
|
||||
__s32 fd;
|
||||
};
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_FANOTIFY_H */
|
||||
|
@@ -156,6 +156,7 @@ struct fsnotify_group {
|
||||
struct mutex access_mutex;
|
||||
struct list_head access_list;
|
||||
wait_queue_head_t access_waitq;
|
||||
bool bypass_perm; /* protected by access_mutex */
|
||||
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
|
||||
int f_flags;
|
||||
} fanotify_data;
|
||||
|
@@ -13,6 +13,7 @@
|
||||
#include <linux/errno.h>
|
||||
|
||||
struct device;
|
||||
struct gpio_chip;
|
||||
|
||||
/*
|
||||
* Some platforms don't support the GPIO programming interface.
|
||||
|
@@ -63,6 +63,9 @@
|
||||
* IRQ lines will appear. Similarly to gpio_base, the expander
|
||||
* will create a block of irqs beginning at this number.
|
||||
* This value is ignored if irq_summary is < 0.
|
||||
* @reset_during_probe: If set to true, the driver will trigger a full
|
||||
* reset of the chip at the beginning of the probe
|
||||
* in order to place it in a known state.
|
||||
*/
|
||||
struct sx150x_platform_data {
|
||||
unsigned gpio_base;
|
||||
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
|
||||
u16 io_polarity;
|
||||
int irq_summary;
|
||||
unsigned irq_base;
|
||||
bool reset_during_probe;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_I2C_SX150X_H */
|
||||
|
20
include/linux/intel-gtt.h
Normal file
20
include/linux/intel-gtt.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Common Intel AGPGART and GTT definitions.
|
||||
*/
|
||||
#ifndef _INTEL_GTT_H
|
||||
#define _INTEL_GTT_H
|
||||
|
||||
#include <linux/agp_backend.h>
|
||||
|
||||
/* This is for Intel only GTT controls.
|
||||
*
|
||||
* Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
|
||||
*/
|
||||
|
||||
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
|
||||
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
|
||||
|
||||
/* flag for GFDT type */
|
||||
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
|
||||
|
||||
#endif
|
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
|
||||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset,
|
||||
int slot)
|
||||
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void *vaddr, int slot)
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
|
||||
{
|
||||
iounmap_atomic(vaddr, slot);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
resource_size_t phys_addr;
|
||||
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap(void *vaddr)
|
||||
io_mapping_unmap(void __iomem *vaddr)
|
||||
{
|
||||
iounmap(vaddr);
|
||||
}
|
||||
@@ -125,38 +125,38 @@ struct io_mapping;
|
||||
static inline struct io_mapping *
|
||||
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
||||
{
|
||||
return (struct io_mapping *) ioremap_wc(base, size);
|
||||
return (struct io_mapping __force *) ioremap_wc(base, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_free(struct io_mapping *mapping)
|
||||
{
|
||||
iounmap(mapping);
|
||||
iounmap((void __force __iomem *) mapping);
|
||||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset,
|
||||
int slot)
|
||||
{
|
||||
return ((char *) mapping) + offset;
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void *vaddr, int slot)
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
|
||||
{
|
||||
}
|
||||
|
||||
/* Non-atomic map/unmap */
|
||||
static inline void *
|
||||
static inline void __iomem *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
return ((char *) mapping) + offset;
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap(void *vaddr)
|
||||
io_mapping_unmap(void __iomem *vaddr)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
|
||||
*/
|
||||
#define kfifo_reset(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
__tmp->kfifo.in = __tmp->kfifo.out = 0; \
|
||||
})
|
||||
|
||||
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
|
||||
*/
|
||||
#define kfifo_reset_out(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
__tmp->kfifo.out = __tmp->kfifo.in; \
|
||||
})
|
||||
|
||||
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
|
||||
*/
|
||||
#define kfifo_len(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpl = (fifo); \
|
||||
typeof((fifo) + 1) __tmpl = (fifo); \
|
||||
__tmpl->kfifo.in - __tmpl->kfifo.out; \
|
||||
})
|
||||
|
||||
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
|
||||
*/
|
||||
#define kfifo_is_empty(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
__tmpq->kfifo.in == __tmpq->kfifo.out; \
|
||||
})
|
||||
|
||||
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
|
||||
*/
|
||||
#define kfifo_is_full(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
|
||||
})
|
||||
|
||||
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
|
||||
#define kfifo_avail(fifo) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmpq = (fifo); \
|
||||
typeof((fifo) + 1) __tmpq = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmpq->rectype); \
|
||||
unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
|
||||
(__recsize) ? ((__avail <= __recsize) ? 0 : \
|
||||
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_skip(fifo) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
if (__recsize) \
|
||||
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_peek_len(fifo) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
|
||||
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_alloc(fifo, size, gfp_mask) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
__is_kfifo_ptr(__tmp) ? \
|
||||
__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
|
||||
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_free(fifo) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
if (__is_kfifo_ptr(__tmp)) \
|
||||
__kfifo_free(__kfifo); \
|
||||
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_init(fifo, buffer, size) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
__is_kfifo_ptr(__tmp) ? \
|
||||
__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
|
||||
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_put(fifo, val) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_get(fifo, val) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_peek(fifo, val) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(val + 1) __val = (val); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((val) + 1) __val = (val); \
|
||||
unsigned int __ret; \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_in(fifo, buf, n) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_out(fifo, buf, n) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_from_user(fifo, from, len, copied) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
const void __user *__from = (from); \
|
||||
unsigned int __len = (len); \
|
||||
unsigned int *__copied = (copied); \
|
||||
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_to_user(fifo, to, len, copied) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
void __user *__to = (to); \
|
||||
unsigned int __len = (len); \
|
||||
unsigned int *__copied = (copied); \
|
||||
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct scatterlist *__sgl = (sgl); \
|
||||
int __nents = (nents); \
|
||||
unsigned int __len = (len); \
|
||||
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_dma_in_finish(fifo, len) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
unsigned int __len = (len); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
struct scatterlist *__sgl = (sgl); \
|
||||
int __nents = (nents); \
|
||||
unsigned int __len = (len); \
|
||||
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
|
||||
*/
|
||||
#define kfifo_dma_out_finish(fifo, len) \
|
||||
(void)({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
unsigned int __len = (len); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
|
||||
#define kfifo_out_peek(fifo, buf, n) \
|
||||
__kfifo_must_check_helper( \
|
||||
({ \
|
||||
typeof(fifo + 1) __tmp = (fifo); \
|
||||
typeof(buf + 1) __buf = (buf); \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
typeof((buf) + 1) __buf = (buf); \
|
||||
unsigned long __n = (n); \
|
||||
const size_t __recsize = sizeof(*__tmp->rectype); \
|
||||
struct __kfifo *__kfifo = &__tmp->kfifo; \
|
||||
|
@@ -16,6 +16,9 @@
|
||||
struct stable_node;
|
||||
struct mem_cgroup;
|
||||
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
#ifdef CONFIG_KSM
|
||||
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, int advice, unsigned long *vm_flags);
|
||||
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
|
||||
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
||||
* but what if the vma was unmerged while the page was swapped out?
|
||||
*/
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct anon_vma *anon_vma = page_anon_vma(page);
|
||||
|
||||
if (!anon_vma ||
|
||||
(anon_vma->root == vma->anon_vma->root &&
|
||||
page->index == linear_page_index(vma, address)))
|
||||
return page;
|
||||
|
||||
return ksm_does_need_to_copy(page, vma, address);
|
||||
return anon_vma &&
|
||||
(anon_vma->root != vma->anon_vma->root ||
|
||||
page->index != linear_page_index(vma, address));
|
||||
}
|
||||
|
||||
int page_referenced_ksm(struct page *page,
|
||||
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
return page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int page_referenced_ksm(struct page *page,
|
||||
|
@@ -150,7 +150,7 @@
|
||||
int i; \
|
||||
preempt_disable(); \
|
||||
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
|
||||
for_each_online_cpu(i) { \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_lock(lock); \
|
||||
@@ -161,7 +161,7 @@
|
||||
void name##_global_unlock(void) { \
|
||||
int i; \
|
||||
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
|
||||
for_each_online_cpu(i) { \
|
||||
for_each_possible_cpu(i) { \
|
||||
arch_spinlock_t *lock; \
|
||||
lock = &per_cpu(name##_lock, i); \
|
||||
arch_spin_unlock(lock); \
|
||||
|
@@ -335,6 +335,7 @@ enum {
|
||||
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
|
||||
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
|
||||
ATA_EHI_QUIET = (1 << 3), /* be quiet */
|
||||
ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
|
||||
|
||||
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
|
||||
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
|
||||
@@ -723,6 +724,7 @@ struct ata_port {
|
||||
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
|
||||
u8 ctl; /* cache of ATA control register */
|
||||
u8 last_ctl; /* Cache last written value */
|
||||
struct ata_link* sff_pio_task_link; /* link currently used */
|
||||
struct delayed_work sff_pio_task;
|
||||
#ifdef CONFIG_ATA_BMDMA
|
||||
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
|
||||
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
|
||||
extern void ata_sff_irq_clear(struct ata_port *ap);
|
||||
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
u8 status, int in_wq);
|
||||
extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
|
||||
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
|
||||
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
|
||||
#define MPT_MINOR 220
|
||||
#define MPT2SAS_MINOR 221
|
||||
#define UINPUT_MINOR 223
|
||||
#define HPET_MINOR 228
|
||||
#define FUSE_MINOR 229
|
||||
#define KVM_MINOR 232
|
||||
|
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
|
||||
int set_page_dirty_lock(struct page *page);
|
||||
int clear_page_dirty_for_io(struct page *page);
|
||||
|
||||
/* Is the vma a continuation of the stack vma above it? */
|
||||
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
||||
}
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
unsigned long new_addr, unsigned long len);
|
||||
@@ -1363,7 +1369,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
|
||||
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
||||
#else
|
||||
static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
||||
{
|
||||
return __pgprot(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
|
||||
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
||||
unsigned long pfn, unsigned long size, pgprot_t);
|
||||
|
@@ -38,6 +38,8 @@
|
||||
* [8:0] Byte/block count
|
||||
*/
|
||||
|
||||
#define R4_MEMORY_PRESENT (1 << 27)
|
||||
|
||||
/*
|
||||
SDIO status in R5
|
||||
Type
|
||||
|
@@ -283,6 +283,13 @@ struct zone {
|
||||
/* zone watermarks, access with *_wmark_pages(zone) macros */
|
||||
unsigned long watermark[NR_WMARK];
|
||||
|
||||
/*
|
||||
* When free pages are below this point, additional steps are taken
|
||||
* when reading the number of free pages to avoid per-cpu counter
|
||||
* drift allowing watermarks to be breached
|
||||
*/
|
||||
unsigned long percpu_drift_mark;
|
||||
|
||||
/*
|
||||
* We don't know if the memory that we're going to allocate will be freeable
|
||||
* or/and it will be released eventually, so to avoid totally wasting several
|
||||
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
|
||||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long zone_nr_free_pages(struct zone *zone);
|
||||
#else
|
||||
#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* The "priority" of VM scanning is how much of the queues we will scan in one
|
||||
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
|
||||
|
@@ -78,6 +78,14 @@ struct mutex_waiter {
|
||||
# include <linux/mutex-debug.h>
|
||||
#else
|
||||
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
||||
/**
|
||||
* mutex_init - initialize the mutex
|
||||
* @mutex: the mutex to be initialized
|
||||
*
|
||||
* Initialize the mutex to unlocked state.
|
||||
*
|
||||
* It is not allowed to initialize an already locked mutex.
|
||||
*/
|
||||
# define mutex_init(mutex) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
|
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
|
||||
unsigned int devfn)
|
||||
{ return NULL; }
|
||||
|
||||
static inline int pci_domain_nr(struct pci_bus *bus)
|
||||
{ return 0; }
|
||||
|
||||
#define dev_is_pci(d) (false)
|
||||
#define dev_is_pf(d) (false)
|
||||
#define dev_num_vf(d) (0)
|
||||
|
@@ -2300,6 +2300,8 @@
|
||||
#define PCI_DEVICE_ID_P2010 0x0079
|
||||
#define PCI_DEVICE_ID_P1020E 0x0100
|
||||
#define PCI_DEVICE_ID_P1020 0x0101
|
||||
#define PCI_DEVICE_ID_P1021E 0x0102
|
||||
#define PCI_DEVICE_ID_P1021 0x0103
|
||||
#define PCI_DEVICE_ID_P1011E 0x0108
|
||||
#define PCI_DEVICE_ID_P1011 0x0109
|
||||
#define PCI_DEVICE_ID_P1022E 0x0110
|
||||
|
@@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
|
||||
|
||||
/* can't distinguish from other static vars, always false */
|
||||
static inline bool is_kernel_percpu_address(unsigned long addr)
|
||||
|
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
|
||||
int ret;
|
||||
|
||||
ret = dquot_alloc_space_nodirty(inode, nr);
|
||||
if (!ret)
|
||||
mark_inode_dirty_sync(inode);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Mark inode fully dirty. Since we are allocating blocks, inode
|
||||
* would become fully dirty soon anyway and it reportedly
|
||||
* reduces inode_lock contention.
|
||||
*/
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -26,6 +26,9 @@ struct semaphore {
|
||||
.wait_list = LIST_HEAD_INIT((name).wait_list), \
|
||||
}
|
||||
|
||||
#define DEFINE_SEMAPHORE(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
#define DECLARE_MUTEX(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
|
@@ -77,8 +77,7 @@ struct serial_struct {
|
||||
#define PORT_16654 11
|
||||
#define PORT_16850 12
|
||||
#define PORT_RSA 13 /* RSA-DV II/S card */
|
||||
#define PORT_U6_16550A 14
|
||||
#define PORT_MAX 14
|
||||
#define PORT_MAX 13
|
||||
|
||||
#define SERIAL_IO_PORT 0
|
||||
#define SERIAL_IO_HUB6 1
|
||||
|
@@ -44,7 +44,8 @@
|
||||
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
|
||||
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
|
||||
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
|
||||
#define PORT_MAX_8250 18 /* max port ID */
|
||||
#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
|
||||
#define PORT_MAX_8250 19 /* max port ID */
|
||||
|
||||
/*
|
||||
* ARM specific type numbers. These are not currently guaranteed
|
||||
@@ -465,7 +466,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
|
||||
#ifdef SUPPORT_SYSRQ
|
||||
if (port->sysrq) {
|
||||
if (ch && time_before(jiffies, port->sysrq)) {
|
||||
handle_sysrq(ch, port->state->port.tty);
|
||||
handle_sysrq(ch);
|
||||
port->sysrq = 0;
|
||||
return 1;
|
||||
}
|
||||
|
@@ -14,7 +14,9 @@
|
||||
#define SPI_MODE_OFFSET 6
|
||||
#define SPI_SCPH_OFFSET 6
|
||||
#define SPI_SCOL_OFFSET 7
|
||||
|
||||
#define SPI_TMOD_OFFSET 8
|
||||
#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
|
||||
#define SPI_TMOD_TR 0x0 /* xmit & recv */
|
||||
#define SPI_TMOD_TO 0x1 /* xmit only */
|
||||
#define SPI_TMOD_RO 0x2 /* recv only */
|
||||
|
@@ -19,6 +19,7 @@ struct bio;
|
||||
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
|
||||
#define SWAP_FLAG_PRIO_MASK 0x7fff
|
||||
#define SWAP_FLAG_PRIO_SHIFT 0
|
||||
#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
|
||||
|
||||
static inline int current_is_kswapd(void)
|
||||
{
|
||||
@@ -142,7 +143,7 @@ struct swap_extent {
|
||||
enum {
|
||||
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
|
||||
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
|
||||
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
|
||||
SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
|
||||
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
|
||||
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
|
||||
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
|
||||
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
|
||||
extern long total_swap_pages;
|
||||
extern void si_swapinfo(struct sysinfo *);
|
||||
extern swp_entry_t get_swap_page(void);
|
||||
extern swp_entry_t get_swap_page_of_type(int);
|
||||
extern int valid_swaphandles(swp_entry_t, unsigned long *);
|
||||
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
|
||||
extern void swap_shmem_alloc(swp_entry_t);
|
||||
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
|
||||
extern int try_to_free_swap(struct page *);
|
||||
struct backing_dev_info;
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
void hibernation_freeze_swap(void);
|
||||
void hibernation_thaw_swap(void);
|
||||
swp_entry_t get_swap_for_hibernation(int type);
|
||||
void swap_free_for_hibernation(swp_entry_t val);
|
||||
#endif
|
||||
|
||||
/* linux/mm/thrash.c */
|
||||
extern struct mm_struct *swap_token_mm;
|
||||
extern void grab_swap_token(struct mm_struct *);
|
||||
|
@@ -15,9 +15,7 @@
|
||||
#define _LINUX_SYSRQ_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
struct pt_regs;
|
||||
struct tty_struct;
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Possible values of bitmask for enabling sysrq functions */
|
||||
/* 0x0001 is reserved for enable everything */
|
||||
@@ -31,7 +29,7 @@ struct tty_struct;
|
||||
#define SYSRQ_ENABLE_RTNICE 0x0100
|
||||
|
||||
struct sysrq_key_op {
|
||||
void (*handler)(int, struct tty_struct *);
|
||||
void (*handler)(int);
|
||||
char *help_msg;
|
||||
char *action_msg;
|
||||
int enable_mask;
|
||||
@@ -44,8 +42,8 @@ struct sysrq_key_op {
|
||||
* are available -- else NULL's).
|
||||
*/
|
||||
|
||||
void handle_sysrq(int key, struct tty_struct *tty);
|
||||
void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
|
||||
void handle_sysrq(int key);
|
||||
void __handle_sysrq(int key, bool check_mask);
|
||||
int register_sysrq_key(int key, struct sysrq_key_op *op);
|
||||
int unregister_sysrq_key(int key, struct sysrq_key_op *op);
|
||||
struct sysrq_key_op *__sysrq_get_key_op(int key);
|
||||
@@ -54,7 +52,11 @@ int sysrq_toggle_support(int enable_mask);
|
||||
|
||||
#else
|
||||
|
||||
static inline void handle_sysrq(int key, struct tty_struct *tty)
|
||||
static inline void handle_sysrq(int key)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __handle_sysrq(int key, bool check_mask)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -37,7 +37,6 @@
|
||||
#define UINPUT_VERSION 3
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define UINPUT_MINOR 223
|
||||
#define UINPUT_NAME "uinput"
|
||||
#define UINPUT_BUFFER_SIZE 16
|
||||
#define UINPUT_NUM_REQUESTS 16
|
||||
|
@@ -342,8 +342,7 @@ extern int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
|
||||
extern void usb_serial_generic_process_read_urb(struct urb *urb);
|
||||
extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
|
||||
void *dest, size_t size);
|
||||
extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
|
||||
struct usb_serial_port *port,
|
||||
extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
|
||||
unsigned int ch);
|
||||
extern int usb_serial_handle_break(struct usb_serial_port *port);
|
||||
|
||||
|
@@ -93,8 +93,11 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
|
||||
* Nested calls are supported (a per-resource counter is maintained)
|
||||
*/
|
||||
|
||||
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc,
|
||||
int interruptible);
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
|
||||
#else
|
||||
static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vga_get_interruptible
|
||||
@@ -131,7 +134,11 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
|
||||
* are already locked by another card. It can be called in any context
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
|
||||
#else
|
||||
static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* vga_put - release lock on legacy VGA resources
|
||||
@@ -146,7 +153,11 @@ extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
|
||||
* released if the counter reaches 0.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
|
||||
#else
|
||||
#define vga_put(pdev, rsrc)
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
|
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
|
||||
return x;
|
||||
}
|
||||
|
||||
/*
|
||||
* More accurate version that also considers the currently pending
|
||||
* deltas. For that we need to loop over all cpus to find the current
|
||||
* deltas. There is no synchronization so the result cannot be
|
||||
* exactly accurate either.
|
||||
*/
|
||||
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
||||
enum zone_stat_item item)
|
||||
{
|
||||
long x = atomic_long_read(&zone->vm_stat[item]);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
for_each_online_cpu(cpu)
|
||||
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
|
||||
|
||||
if (x < 0)
|
||||
x = 0;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
extern unsigned long global_reclaimable_pages(void);
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
|
||||
|
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
|
||||
|
||||
enum {
|
||||
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
|
||||
WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
|
||||
WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
|
||||
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
|
||||
WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
|
||||
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||
WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
|
||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
|
||||
WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
|
||||
#else
|
||||
WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
|
||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||
#endif
|
||||
|
||||
WORK_STRUCT_COLOR_BITS = 4,
|
||||
|
||||
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
|
||||
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
|
||||
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
|
||||
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||
@@ -59,8 +61,8 @@ enum {
|
||||
|
||||
/*
|
||||
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
|
||||
* off. This makes cwqs aligned to 128 bytes which isn't too
|
||||
* excessive while allowing 15 workqueue flush colors.
|
||||
* off. This makes cwqs aligned to 256 bytes and allows 15
|
||||
* workqueue flush colors.
|
||||
*/
|
||||
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
|
||||
WORK_STRUCT_COLOR_BITS,
|
||||
@@ -241,6 +243,8 @@ enum {
|
||||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||
|
||||
WQ_DYING = 1 << 6, /* internal: workqueue is dying */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||
|
Reference in New Issue
Block a user