Merge branch 'x86/urgent' into x86/asm, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2016-02-18 09:28:03 +01:00
1010 changed files with 11172 additions and 58848 deletions

View File

@@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0 /* serve it */
#define BLKPREP_KILL 1 /* fatal error, kill */
#define BLKPREP_DEFER 2 /* leave on queue */
enum {
BLKPREP_OK, /* serve it */
BLKPREP_KILL, /* fatal error, kill, return -EIO */
BLKPREP_DEFER, /* leave on queue */
BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
};
extern unsigned long blk_max_low_pfn, blk_max_pfn;

View File

@@ -63,6 +63,18 @@
#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
#define CEPH_FEATURE_NEW_OSDOP_ENCODING (1ULL<<56) /* New, v7 encoding */
#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \

View File

@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
/*
* Incremented by online self and children. Used to guarantee that
* parents are not offlined before their children.
*/
atomic_t online_cnt;
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;

View File

@@ -37,7 +37,7 @@ struct cleancache_ops {
void (*invalidate_fs)(int);
};
extern int cleancache_register_ops(struct cleancache_ops *ops);
extern int cleancache_register_ops(const struct cleancache_ops *ops);
extern void __cleancache_init_fs(struct super_block *);
extern void __cleancache_init_shared_fs(struct super_block *);
extern int __cleancache_get_page(struct page *);
@@ -48,14 +48,14 @@ extern void __cleancache_invalidate_fs(struct super_block *);
#ifdef CONFIG_CLEANCACHE
#define cleancache_enabled (1)
static inline bool cleancache_fs_enabled(struct page *page)
{
return page->mapping->host->i_sb->cleancache_poolid >= 0;
}
static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
{
return mapping->host->i_sb->cleancache_poolid >= 0;
}
static inline bool cleancache_fs_enabled(struct page *page)
{
return cleancache_fs_enabled_mapping(page->mapping);
}
#else
#define cleancache_enabled (0)
#define cleancache_fs_enabled(_page) (0)
@@ -89,11 +89,9 @@ static inline void cleancache_init_shared_fs(struct super_block *sb)
static inline int cleancache_get_page(struct page *page)
{
int ret = -1;
if (cleancache_enabled && cleancache_fs_enabled(page))
ret = __cleancache_get_page(page);
return ret;
return __cleancache_get_page(page);
return -1;
}
static inline void cleancache_put_page(struct page *page)

View File

@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
extern void cpuset_post_attach_flush(void);
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
static inline void cpuset_post_attach_flush(void)
{
}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */

View File

@@ -59,7 +59,8 @@ enum {
CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
};
/*
@@ -205,6 +206,11 @@ struct crush_map {
* mappings line up a bit better with previous mappings. */
__u8 chooseleaf_vary_r;
/* if true, it makes chooseleaf firstn to return stable results (if
* no local retry) so that data migrations would be optimal when some
* device fails. */
__u8 chooseleaf_stable;
#ifndef __KERNEL__
/*
* version 0 (original) of straw_calc has various flaws. version 1

View File

@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
#else
static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n)
{
return ERR_PTR(-ENXIO);
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t);

View File

@@ -19,6 +19,8 @@
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
void devpts_add_ref(struct inode *ptmx_inode);
void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
static inline void devpts_add_ref(struct inode *ptmx_inode) { }
static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{

View File

@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
struct list_head *head, bool remove);
bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
unsigned long data_size);
bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
size_t len);
extern struct work_struct efivar_work;
void efivar_run_worker(void);

View File

@@ -484,9 +484,6 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
#ifdef CONFIG_FS_DAX
int bd_map_count;
#endif
};
/*
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
static inline bool io_is_direct(struct file *filp)
{
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
}
static inline int iocb_flags(struct file *file)

View File

@@ -165,7 +165,6 @@ struct ftrace_ops {
ftrace_func_t saved_func;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
int nr_trampolines;
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash;

View File

@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_CMA
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype);
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
#endif
#ifdef CONFIG_CMA
/* CMA stuff */
extern void init_cma_reserved_pageblock(struct page *page);
#endif
#endif /* __LINUX_GFP_H */

View File

@@ -87,7 +87,8 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @start_pid: timer statistics field to store the pid of the task which
* @is_rel: Set if the timer was armed relative
* @start_pid: timer statistics field to store the pid of the task which
* started the timer
* @start_site: timer statistics field to store the site where the timer
* was started
@@ -101,7 +102,8 @@ struct hrtimer {
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
u8 state;
u8 is_rel;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
#endif
static inline ktime_t
__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
{
ktime_t rem = ktime_sub(timer->node.expires, now);
/*
* Adjust relative timers for the extra we added in
* hrtimer_start_range_ns() to prevent short timeouts.
*/
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
rem.tv64 -= hrtimer_resolution;
return rem;
}
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
return __hrtimer_expires_remaining_adjusted(timer,
timer->base->get_time());
}
extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
}
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
}
extern u64 hrtimer_get_next_event(void);

View File

@@ -133,8 +133,9 @@ struct iommu_dm_region {
/**
* struct iommu_ops - iommu ops and capabilities
* @domain_init: init iommu domain
* @domain_destroy: destroy iommu domain
* @capable: check capability
* @domain_alloc: allocate iommu domain
* @domain_free: free iommu domain
* @attach_dev: attach device to an iommu domain
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
@@ -144,8 +145,15 @@ struct iommu_dm_region {
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping
* @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
* @get_dm_regions: Request list of direct mapping requirements for a device
* @put_dm_regions: Free list of direct mapping requirements for a device
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @domain_set_windows: Set the number of windows for a domain
* @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
* @priv: per-instance data private to the iommu driver
@@ -182,9 +190,9 @@ struct iommu_ops {
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
/* Set the numer of window per domain */
/* Set the number of windows per domain */
int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
/* Get the numer of window per domain */
/* Get the number of windows per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
#ifdef CONFIG_OF_IOMMU

View File

@@ -70,6 +70,7 @@ struct irq_fwspec {
*/
enum irq_domain_bus_token {
DOMAIN_BUS_ANY = 0,
DOMAIN_BUS_WIRED,
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,

View File

@@ -526,6 +526,7 @@ enum ata_lpm_policy {
enum ata_lpm_hints {
ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */
ATA_LPM_HIPM = (1 << 1), /* may use HIPM */
ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */
};
/* forward declarations */

View File

@@ -66,7 +66,7 @@ struct lock_class {
/*
* class-hash:
*/
struct list_head hash_entry;
struct hlist_node hash_entry;
/*
* global list of all lock-classes:
@@ -199,7 +199,7 @@ struct lock_chain {
u8 irq_context;
u8 depth;
u16 base;
struct list_head entry;
struct hlist_node entry;
u64 chain_key;
};

View File

@@ -51,7 +51,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
MEMCG_SOCK,
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
MEMCG_NR_STAT,
};

View File

@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#define VM_STACK VM_GROWSUP
#else
#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#define VM_STACK VM_GROWSDOWN
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
/*
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
@@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
extern struct task_struct *task_of_stack(struct task_struct *task,
struct vm_area_struct *vma, bool in_group);
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,

View File

@@ -424,9 +424,9 @@ struct mm_struct {
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
unsigned long pinned_vm; /* Refcount permanently increased */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;

View File

@@ -682,6 +682,12 @@ typedef struct pglist_data {
*/
unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
spinlock_t split_queue_lock;
struct list_head split_queue;
unsigned long split_queue_len;
#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)

View File

@@ -324,6 +324,12 @@ struct module_layout {
#define __module_layout_align
#endif
struct mod_kallsyms {
Elf_Sym *symtab;
unsigned int num_symtab;
char *strtab;
};
struct module {
enum module_state state;
@@ -405,15 +411,10 @@ struct module {
#endif
#ifdef CONFIG_KALLSYMS
/*
* We keep the symbol and string tables for kallsyms.
* The core_* fields below are temporary, loader-only (they
* could really be discarded after module init).
*/
Elf_Sym *symtab, *core_symtab;
unsigned int num_symtab, core_num_syms;
char *strtab, *core_strtab;
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
struct mod_kallsyms *kallsyms;
struct mod_kallsyms core_kallsyms;
/* Section attributes */
struct module_sect_attrs *sect_attrs;

View File

@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
clear_bit(NAPI_STATE_NPSVC, &n->state);
}
#ifdef CONFIG_SMP
/**
* napi_synchronize - wait until NAPI is not running
* @n: napi context
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
*/
static inline void napi_synchronize(const struct napi_struct *n)
{
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
if (IS_ENABLED(CONFIG_SMP))
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
else
barrier();
}
#else
# define napi_synchronize(n) barrier()
#endif
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,

View File

@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
#ifdef CONFIG_OF
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section(__##table##_of_table) \

View File

@@ -634,9 +634,6 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
struct delayed_work orphans_remove;
bool orphans_remove_sched;
};
/*
@@ -729,7 +726,7 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern struct perf_event *perf_event_get(unsigned int fd);
extern struct file *perf_event_get(unsigned int fd);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
@@ -1044,7 +1041,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern int __perf_event_disable(void *info);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
@@ -1070,7 +1067,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
return ERR_PTR(-EINVAL);

View File

@@ -10,7 +10,7 @@
* backing is indicated by flags in the high bits of the value.
*/
typedef struct {
unsigned long val;
u64 val;
} pfn_t;
#endif

View File

@@ -9,14 +9,13 @@
* PFN_DEV - pfn is not covered by system memmap by default
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
*/
#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \
<< (BITS_PER_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1))
#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2))
#define PFN_DEV (1UL << (BITS_PER_LONG - 3))
#define PFN_MAP (1UL << (BITS_PER_LONG - 4))
#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
{
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
@@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
return __pfn_to_pfn_t(pfn, 0);
}
extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
static inline bool pfn_t_has_page(pfn_t pfn)
{
@@ -48,7 +47,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
return NULL;
}
static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
{
return PFN_PHYS(pfn_t_to_pfn(pfn));
}
@@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
const unsigned long flags = PFN_DEV|PFN_MAP;
const u64 flags = PFN_DEV|PFN_MAP;
return (pfn.val & flags) == flags;
}

View File

@@ -378,13 +378,29 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state
*
* If we iterate over a tree protected only by the RCU lock, a race
* against deletion or creation may result in seeing a slot for which
* radix_tree_deref_retry() returns true. If so, call this function
* and continue the iteration.
*/
static inline __must_check
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
return NULL;
}
/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
static __always_inline unsigned
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
@@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return slot + offset + 1;
}
} else {
unsigned size = radix_tree_chunk_size(iter) - 1;
long size = radix_tree_chunk_size(iter);
while (size--) {
while (--size > 0) {
slot++;
iter->index++;
if (likely(*slot))

View File

@@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
# define jiffies raid6_jiffies()
# define printk printf
# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
# define GFP_KERNEL 0
# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
PROT_READ|PROT_WRITE, \

View File

@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
down_write(&anon_vma->root->rwsem);
}
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
up_write(&anon_vma->root->rwsem);
}
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);

View File

@@ -299,6 +299,7 @@ struct sk_buff;
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
extern int sysctl_max_skb_frags;
typedef struct skb_frag_struct skb_frag_t;

View File

@@ -1,10 +1,13 @@
#ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H
#include <linux/dma-direction.h>
#include <linux/init.h>
#include <linux/types.h>
struct device;
struct dma_attrs;
struct page;
struct scatterlist;
extern int swiotlb_force;

View File

@@ -649,6 +649,7 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* tty_mutex.c */
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
extern int tty_lock_interruptible(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);

View File

@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
unsigned long ucs2_utf8size(const ucs2_char_t *src);
unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
unsigned long maxlength);
#endif /* _LINUX_UCS2_STRING_H_ */

View File

@@ -311,6 +311,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
#define create_freezable_workqueue(name) \
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
1, (name))
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
WQ_MEM_RECLAIM, 1, (name))
#define create_singlethread_workqueue(name) \
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
extern void destroy_workqueue(struct workqueue_struct *wq);