Merge tag 'v4.4-rc6' into devel
Linux 4.4-rc6
This commit is contained in:
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
|
||||
}
|
||||
|
||||
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, const char *cells_name,
|
||||
size_t index, struct acpi_reference_args *args)
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
|
||||
*/
|
||||
static inline __u32 rol32(__u32 word, unsigned int shift)
|
||||
{
|
||||
return (word << shift) | (word >> (32 - shift));
|
||||
return (word << shift) | (word >> ((-shift) & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -254,6 +254,7 @@ struct queue_limits {
|
||||
unsigned long virt_boundary_mask;
|
||||
|
||||
unsigned int max_hw_sectors;
|
||||
unsigned int max_dev_sectors;
|
||||
unsigned int chunk_sectors;
|
||||
unsigned int max_sectors;
|
||||
unsigned int max_segment_size;
|
||||
@@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
extern void blk_add_request_payload(struct request *rq, struct page *page,
|
||||
unsigned int len);
|
||||
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
|
||||
extern int blk_lld_busy(struct request_queue *q);
|
||||
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
@@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
struct scsi_ioctl_command __user *);
|
||||
|
||||
extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
@@ -958,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
|
||||
extern void blk_cleanup_queue(struct request_queue *);
|
||||
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
||||
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
||||
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
|
||||
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
||||
|
@@ -40,6 +40,7 @@ struct bpf_map {
|
||||
struct user_struct *user;
|
||||
const struct bpf_map_ops *ops;
|
||||
struct work_struct work;
|
||||
atomic_t usercnt;
|
||||
};
|
||||
|
||||
struct bpf_map_type_list {
|
||||
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
void bpf_prog_put_rcu(struct bpf_prog *prog);
|
||||
|
||||
struct bpf_map *bpf_map_get(u32 ufd);
|
||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
|
||||
struct bpf_map *__bpf_map_get(struct fd f);
|
||||
void bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
|
||||
extern int sysctl_unprivileged_bpf_disabled;
|
||||
|
@@ -90,7 +90,6 @@ enum {
|
||||
*/
|
||||
struct cgroup_file {
|
||||
/* do not access any fields from outside cgroup core */
|
||||
struct list_head node; /* anchored at css->files */
|
||||
struct kernfs_node *kn;
|
||||
};
|
||||
|
||||
@@ -134,9 +133,6 @@ struct cgroup_subsys_state {
|
||||
*/
|
||||
u64 serial_nr;
|
||||
|
||||
/* all cgroup_files associated with this css */
|
||||
struct list_head files;
|
||||
|
||||
/* percpu_ref killing and RCU release */
|
||||
struct rcu_head rcu_head;
|
||||
struct work_struct destroy_work;
|
||||
@@ -426,12 +422,9 @@ struct cgroup_subsys {
|
||||
void (*css_reset)(struct cgroup_subsys_state *css);
|
||||
void (*css_e_css_changed)(struct cgroup_subsys_state *css);
|
||||
|
||||
int (*can_attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
void (*cancel_attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
void (*attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
int (*can_attach)(struct cgroup_taskset *tset);
|
||||
void (*cancel_attach)(struct cgroup_taskset *tset);
|
||||
void (*attach)(struct cgroup_taskset *tset);
|
||||
int (*can_fork)(struct task_struct *task, void **priv_p);
|
||||
void (*cancel_fork)(struct task_struct *task, void *priv);
|
||||
void (*fork)(struct task_struct *task, void *priv);
|
||||
|
@@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
|
||||
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cftype *cfts);
|
||||
void cgroup_file_notify(struct cgroup_file *cfile);
|
||||
|
||||
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
|
||||
@@ -119,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
|
||||
struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
|
||||
struct cgroup_subsys_state *css);
|
||||
|
||||
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
|
||||
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
|
||||
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
|
||||
struct cgroup_subsys_state **dst_cssp);
|
||||
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
|
||||
struct cgroup_subsys_state **dst_cssp);
|
||||
|
||||
void css_task_iter_start(struct cgroup_subsys_state *css,
|
||||
struct css_task_iter *it);
|
||||
@@ -235,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it);
|
||||
/**
|
||||
* cgroup_taskset_for_each - iterate cgroup_taskset
|
||||
* @task: the loop cursor
|
||||
* @dst_css: the destination css
|
||||
* @tset: taskset to iterate
|
||||
*
|
||||
* @tset may contain multiple tasks and they may belong to multiple
|
||||
* processes. When there are multiple tasks in @tset, if a task of a
|
||||
* process is in @tset, all tasks of the process are in @tset. Also, all
|
||||
* are guaranteed to share the same source and destination csses.
|
||||
* processes.
|
||||
*
|
||||
* On the v2 hierarchy, there may be tasks from multiple processes and they
|
||||
* may not share the source or destination csses.
|
||||
*
|
||||
* On traditional hierarchies, when there are multiple tasks in @tset, if a
|
||||
* task of a process is in @tset, all tasks of the process are in @tset.
|
||||
* Also, all are guaranteed to share the same source and destination csses.
|
||||
*
|
||||
* Iteration is not in any specific order.
|
||||
*/
|
||||
#define cgroup_taskset_for_each(task, tset) \
|
||||
for ((task) = cgroup_taskset_first((tset)); (task); \
|
||||
(task) = cgroup_taskset_next((tset)))
|
||||
#define cgroup_taskset_for_each(task, dst_css, tset) \
|
||||
for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
|
||||
(task); \
|
||||
(task) = cgroup_taskset_next((tset), &(dst_css)))
|
||||
|
||||
/**
|
||||
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
|
||||
* @leader: the loop cursor
|
||||
* @dst_css: the destination css
|
||||
* @tset: takset to iterate
|
||||
*
|
||||
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
|
||||
* may not contain any.
|
||||
*/
|
||||
#define cgroup_taskset_for_each_leader(leader, tset) \
|
||||
for ((leader) = cgroup_taskset_first((tset)); (leader); \
|
||||
(leader) = cgroup_taskset_next((tset))) \
|
||||
#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
|
||||
for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
|
||||
(leader); \
|
||||
(leader) = cgroup_taskset_next((tset), &(dst_css))) \
|
||||
if ((leader) != (leader)->group_leader) \
|
||||
; \
|
||||
else
|
||||
@@ -516,19 +528,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
|
||||
pr_cont_kernfs_path(cgrp->kn);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_file_notify - generate a file modified event for a cgroup_file
|
||||
* @cfile: target cgroup_file
|
||||
*
|
||||
* @cfile must have been obtained by setting cftype->file_offset.
|
||||
*/
|
||||
static inline void cgroup_file_notify(struct cgroup_file *cfile)
|
||||
{
|
||||
/* might not have been created due to one of the CFTYPE selector flags */
|
||||
if (cfile->kn)
|
||||
kernfs_notify(cfile->kn);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
struct cgroup_subsys_state;
|
||||
|
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
|
||||
int configfs_register_subsystem(struct configfs_subsystem *subsys);
|
||||
void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
|
||||
|
||||
int configfs_register_group(struct config_group *parent_group,
|
||||
struct config_group *group);
|
||||
void configfs_unregister_group(struct config_group *group);
|
||||
|
||||
struct config_group *
|
||||
configfs_register_default_group(struct config_group *parent_group,
|
||||
const char *name,
|
||||
struct config_item_type *item_type);
|
||||
void configfs_unregister_default_group(struct config_group *group);
|
||||
|
||||
/* These functions can sleep and can alloc with GFP_KERNEL */
|
||||
/* WARNING: These cannot be called underneath configfs callbacks!! */
|
||||
int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
|
||||
|
@@ -77,6 +77,7 @@ struct cpufreq_policy {
|
||||
unsigned int suspend_freq; /* freq to set during suspend */
|
||||
|
||||
unsigned int policy; /* see above */
|
||||
unsigned int last_policy; /* policy before unplug */
|
||||
struct cpufreq_governor *governor; /* see below */
|
||||
void *governor_data;
|
||||
bool governor_enabled; /* governor start/stop flag */
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern int dns_query(const char *type, const char *name, size_t namelen,
|
||||
const char *options, char **_result, time_t *_expiry);
|
||||
const char *options, char **_result, time64_t *_expiry);
|
||||
|
||||
#endif /* KERNEL */
|
||||
|
||||
|
@@ -29,7 +29,11 @@
|
||||
/* A few generic types ... taken from ses-2 */
|
||||
enum enclosure_component_type {
|
||||
ENCLOSURE_COMPONENT_DEVICE = 0x01,
|
||||
ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
|
||||
ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
|
||||
ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
|
||||
ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
|
||||
ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
|
||||
};
|
||||
|
||||
/* ses-2 common element status */
|
||||
|
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
|
||||
|
||||
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
|
||||
{
|
||||
return gfp_flags & __GFP_DIRECT_RECLAIM;
|
||||
return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
@@ -227,7 +227,7 @@ struct ipv6_pinfo {
|
||||
struct ipv6_ac_socklist *ipv6_ac_list;
|
||||
struct ipv6_fl_socklist __rcu *ipv6_fl_list;
|
||||
|
||||
struct ipv6_txoptions *opt;
|
||||
struct ipv6_txoptions __rcu *opt;
|
||||
struct sk_buff *pktoptions;
|
||||
struct sk_buff *rxpmtu;
|
||||
struct inet6_cork cork;
|
||||
|
@@ -330,6 +330,7 @@ struct rdists {
|
||||
};
|
||||
|
||||
struct irq_domain;
|
||||
struct device_node;
|
||||
int its_cpu_init(void);
|
||||
int its_init(struct device_node *node, struct rdists *rdists,
|
||||
struct irq_domain *domain);
|
||||
|
@@ -5,7 +5,7 @@
|
||||
* Jump label support
|
||||
*
|
||||
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
|
||||
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* DEPRECATED API:
|
||||
*
|
||||
|
@@ -25,7 +25,7 @@
|
||||
|
||||
#ifdef CONFIG_DEBUG_KMEMLEAK
|
||||
|
||||
extern void kmemleak_init(void) __ref;
|
||||
extern void kmemleak_init(void) __init;
|
||||
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||
gfp_t gfp) __ref;
|
||||
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
|
||||
|
@@ -19,7 +19,6 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct kref {
|
||||
atomic_t refcount;
|
||||
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
|
||||
return kref_sub(kref, 1, release);
|
||||
}
|
||||
|
||||
/**
|
||||
* kref_put_spinlock_irqsave - decrement refcount for object.
|
||||
* @kref: object.
|
||||
* @release: pointer to the function that will clean up the object when the
|
||||
* last reference to the object is released.
|
||||
* This pointer is required, and it is not acceptable to pass kfree
|
||||
* in as this function.
|
||||
* @lock: lock to take in release case
|
||||
*
|
||||
* Behaves identical to kref_put with one exception. If the reference count
|
||||
* drops to zero, the lock will be taken atomically wrt dropping the reference
|
||||
* count. The release function has to call spin_unlock() without _irqrestore.
|
||||
*/
|
||||
static inline int kref_put_spinlock_irqsave(struct kref *kref,
|
||||
void (*release)(struct kref *kref),
|
||||
spinlock_t *lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(release == NULL);
|
||||
if (atomic_add_unless(&kref->refcount, -1, 1))
|
||||
return 0;
|
||||
spin_lock_irqsave(lock, flags);
|
||||
if (atomic_dec_and_test(&kref->refcount)) {
|
||||
release(kref);
|
||||
local_irq_restore(flags);
|
||||
return 1;
|
||||
}
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kref_put_mutex(struct kref *kref,
|
||||
void (*release)(struct kref *kref),
|
||||
struct mutex *lock)
|
||||
|
@@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
|
||||
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
|
||||
idx++)
|
||||
|
||||
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
if (vcpu->vcpu_id == id)
|
||||
return vcpu;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define kvm_for_each_memslot(memslot, slots) \
|
||||
for (memslot = &slots->memslots[0]; \
|
||||
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
|
||||
|
@@ -210,6 +210,7 @@ enum {
|
||||
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
|
||||
/* (doesn't imply presence) */
|
||||
ATA_FLAG_SATA = (1 << 1),
|
||||
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
|
||||
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
|
||||
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
|
||||
ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
|
||||
|
@@ -50,15 +50,21 @@ enum {
|
||||
NVM_IO_DUAL_ACCESS = 0x1,
|
||||
NVM_IO_QUAD_ACCESS = 0x2,
|
||||
|
||||
/* NAND Access Modes */
|
||||
NVM_IO_SUSPEND = 0x80,
|
||||
NVM_IO_SLC_MODE = 0x100,
|
||||
NVM_IO_SCRAMBLE_DISABLE = 0x200,
|
||||
|
||||
/* Block Types */
|
||||
NVM_BLK_T_FREE = 0x0,
|
||||
NVM_BLK_T_BAD = 0x1,
|
||||
NVM_BLK_T_DEV = 0x2,
|
||||
NVM_BLK_T_HOST = 0x4,
|
||||
};
|
||||
|
||||
struct nvm_id_group {
|
||||
u8 mtype;
|
||||
u8 fmtype;
|
||||
u16 res16;
|
||||
u8 num_ch;
|
||||
u8 num_lun;
|
||||
u8 num_pln;
|
||||
@@ -74,9 +80,9 @@ struct nvm_id_group {
|
||||
u32 tbet;
|
||||
u32 tbem;
|
||||
u32 mpos;
|
||||
u32 mccap;
|
||||
u16 cpar;
|
||||
u8 res[913];
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct nvm_addr_format {
|
||||
u8 ch_offset;
|
||||
@@ -91,19 +97,15 @@ struct nvm_addr_format {
|
||||
u8 pg_len;
|
||||
u8 sect_offset;
|
||||
u8 sect_len;
|
||||
u8 res[4];
|
||||
};
|
||||
|
||||
struct nvm_id {
|
||||
u8 ver_id;
|
||||
u8 vmnt;
|
||||
u8 cgrps;
|
||||
u8 res[5];
|
||||
u32 cap;
|
||||
u32 dom;
|
||||
struct nvm_addr_format ppaf;
|
||||
u8 ppat;
|
||||
u8 resv[224];
|
||||
struct nvm_id_group groups[4];
|
||||
} __packed;
|
||||
|
||||
@@ -123,39 +125,28 @@ struct nvm_tgt_instance {
|
||||
#define NVM_VERSION_MINOR 0
|
||||
#define NVM_VERSION_PATCH 0
|
||||
|
||||
#define NVM_SEC_BITS (8)
|
||||
#define NVM_PL_BITS (6)
|
||||
#define NVM_PG_BITS (16)
|
||||
#define NVM_BLK_BITS (16)
|
||||
#define NVM_LUN_BITS (10)
|
||||
#define NVM_PG_BITS (16)
|
||||
#define NVM_SEC_BITS (8)
|
||||
#define NVM_PL_BITS (8)
|
||||
#define NVM_LUN_BITS (8)
|
||||
#define NVM_CH_BITS (8)
|
||||
|
||||
struct ppa_addr {
|
||||
/* Generic structure for all addresses */
|
||||
union {
|
||||
/* Channel-based PPA format in nand 4x2x2x2x8x10 */
|
||||
struct {
|
||||
u64 ch : 4;
|
||||
u64 sec : 2; /* 4 sectors per page */
|
||||
u64 pl : 2; /* 4 planes per LUN */
|
||||
u64 lun : 2; /* 4 LUNs per channel */
|
||||
u64 pg : 8; /* 256 pages per block */
|
||||
u64 blk : 10;/* 1024 blocks per plane */
|
||||
u64 resved : 36;
|
||||
} chnl;
|
||||
|
||||
/* Generic structure for all addresses */
|
||||
struct {
|
||||
u64 blk : NVM_BLK_BITS;
|
||||
u64 pg : NVM_PG_BITS;
|
||||
u64 sec : NVM_SEC_BITS;
|
||||
u64 pl : NVM_PL_BITS;
|
||||
u64 pg : NVM_PG_BITS;
|
||||
u64 blk : NVM_BLK_BITS;
|
||||
u64 lun : NVM_LUN_BITS;
|
||||
u64 ch : NVM_CH_BITS;
|
||||
} g;
|
||||
|
||||
u64 ppa;
|
||||
};
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct nvm_rq {
|
||||
struct nvm_tgt_instance *ins;
|
||||
@@ -191,18 +182,18 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
|
||||
struct nvm_block;
|
||||
|
||||
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
|
||||
typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
|
||||
typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
|
||||
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
|
||||
nvm_l2p_update_fn *, void *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
|
||||
nvm_bb_update_fn *, void *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
|
||||
typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||
typedef void (nvm_destroy_dma_pool_fn)(void *);
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
|
||||
dma_addr_t *);
|
||||
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
|
||||
|
||||
@@ -210,7 +201,7 @@ struct nvm_dev_ops {
|
||||
nvm_id_fn *identity;
|
||||
nvm_get_l2p_tbl_fn *get_l2p_tbl;
|
||||
nvm_op_bb_tbl_fn *get_bb_tbl;
|
||||
nvm_op_set_bb_fn *set_bb;
|
||||
nvm_op_set_bb_fn *set_bb_tbl;
|
||||
|
||||
nvm_submit_io_fn *submit_io;
|
||||
nvm_erase_blk_fn *erase_block;
|
||||
@@ -220,7 +211,7 @@ struct nvm_dev_ops {
|
||||
nvm_dev_dma_alloc_fn *dev_dma_alloc;
|
||||
nvm_dev_dma_free_fn *dev_dma_free;
|
||||
|
||||
uint8_t max_phys_sect;
|
||||
unsigned int max_phys_sect;
|
||||
};
|
||||
|
||||
struct nvm_lun {
|
||||
@@ -229,7 +220,9 @@ struct nvm_lun {
|
||||
int lun_id;
|
||||
int chnl_id;
|
||||
|
||||
unsigned int nr_inuse_blocks; /* Number of used blocks */
|
||||
unsigned int nr_free_blocks; /* Number of unused blocks */
|
||||
unsigned int nr_bad_blocks; /* Number of bad blocks */
|
||||
struct nvm_block *blocks;
|
||||
|
||||
spinlock_t lock;
|
||||
@@ -263,8 +256,7 @@ struct nvm_dev {
|
||||
int blks_per_lun;
|
||||
int sec_size;
|
||||
int oob_size;
|
||||
int addr_mode;
|
||||
struct nvm_addr_format addr_format;
|
||||
struct nvm_addr_format ppaf;
|
||||
|
||||
/* Calculated/Cached values. These do not reflect the actual usable
|
||||
* blocks at run-time.
|
||||
@@ -290,118 +282,45 @@ struct nvm_dev {
|
||||
char name[DISK_NAME_LEN];
|
||||
};
|
||||
|
||||
/* fallback conversion */
|
||||
static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
|
||||
struct ppa_addr r)
|
||||
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
|
||||
struct ppa_addr r)
|
||||
{
|
||||
struct ppa_addr l;
|
||||
|
||||
l.ppa = r.g.sec +
|
||||
r.g.pg * dev->sec_per_pg +
|
||||
r.g.blk * (dev->pgs_per_blk *
|
||||
dev->sec_per_pg) +
|
||||
r.g.lun * (dev->blks_per_lun *
|
||||
dev->pgs_per_blk *
|
||||
dev->sec_per_pg) +
|
||||
r.g.ch * (dev->blks_per_lun *
|
||||
dev->pgs_per_blk *
|
||||
dev->luns_per_chnl *
|
||||
dev->sec_per_pg);
|
||||
l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
|
||||
l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
|
||||
l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
|
||||
l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
|
||||
l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
|
||||
l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
/* fallback conversion */
|
||||
static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
|
||||
struct ppa_addr r)
|
||||
{
|
||||
struct ppa_addr l;
|
||||
int secs, pgs, blks, luns;
|
||||
sector_t ppa = r.ppa;
|
||||
|
||||
l.ppa = 0;
|
||||
|
||||
div_u64_rem(ppa, dev->sec_per_pg, &secs);
|
||||
l.g.sec = secs;
|
||||
|
||||
sector_div(ppa, dev->sec_per_pg);
|
||||
div_u64_rem(ppa, dev->sec_per_blk, &pgs);
|
||||
l.g.pg = pgs;
|
||||
|
||||
sector_div(ppa, dev->pgs_per_blk);
|
||||
div_u64_rem(ppa, dev->blks_per_lun, &blks);
|
||||
l.g.blk = blks;
|
||||
|
||||
sector_div(ppa, dev->blks_per_lun);
|
||||
div_u64_rem(ppa, dev->luns_per_chnl, &luns);
|
||||
l.g.lun = luns;
|
||||
|
||||
sector_div(ppa, dev->luns_per_chnl);
|
||||
l.g.ch = ppa;
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
|
||||
static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
|
||||
struct ppa_addr r)
|
||||
{
|
||||
struct ppa_addr l;
|
||||
|
||||
l.ppa = 0;
|
||||
|
||||
l.chnl.sec = r.g.sec;
|
||||
l.chnl.pl = r.g.pl;
|
||||
l.chnl.pg = r.g.pg;
|
||||
l.chnl.blk = r.g.blk;
|
||||
l.chnl.lun = r.g.lun;
|
||||
l.chnl.ch = r.g.ch;
|
||||
/*
|
||||
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
|
||||
*/
|
||||
l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
|
||||
(((1 << dev->ppaf.blk_len) - 1));
|
||||
l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
|
||||
(((1 << dev->ppaf.pg_len) - 1));
|
||||
l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
|
||||
(((1 << dev->ppaf.sect_len) - 1));
|
||||
l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
|
||||
(((1 << dev->ppaf.pln_len) - 1));
|
||||
l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
|
||||
(((1 << dev->ppaf.lun_len) - 1));
|
||||
l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
|
||||
(((1 << dev->ppaf.ch_len) - 1));
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
|
||||
{
|
||||
struct ppa_addr l;
|
||||
|
||||
l.ppa = 0;
|
||||
|
||||
l.g.sec = r.chnl.sec;
|
||||
l.g.pl = r.chnl.pl;
|
||||
l.g.pg = r.chnl.pg;
|
||||
l.g.blk = r.chnl.blk;
|
||||
l.g.lun = r.chnl.lun;
|
||||
l.g.ch = r.chnl.ch;
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
|
||||
struct ppa_addr gppa)
|
||||
{
|
||||
switch (dev->addr_mode) {
|
||||
case NVM_ADDRMODE_LINEAR:
|
||||
return __linear_to_generic_addr(dev, gppa);
|
||||
case NVM_ADDRMODE_CHANNEL:
|
||||
return __chnl_to_generic_addr(gppa);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return gppa;
|
||||
}
|
||||
|
||||
static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
|
||||
struct ppa_addr gppa)
|
||||
{
|
||||
switch (dev->addr_mode) {
|
||||
case NVM_ADDRMODE_LINEAR:
|
||||
return __generic_to_linear_addr(dev, gppa);
|
||||
case NVM_ADDRMODE_CHANNEL:
|
||||
return __generic_to_chnl_addr(gppa);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return gppa;
|
||||
}
|
||||
|
||||
static inline int ppa_empty(struct ppa_addr ppa_addr)
|
||||
{
|
||||
return (ppa_addr.ppa == ADDR_EMPTY);
|
||||
@@ -468,7 +387,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
|
||||
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
|
||||
unsigned long);
|
||||
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
|
||||
typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
|
||||
typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
|
||||
|
||||
struct nvmm_type {
|
||||
const char *name;
|
||||
@@ -492,7 +411,7 @@ struct nvmm_type {
|
||||
nvmm_get_lun_fn *get_lun;
|
||||
|
||||
/* Statistics */
|
||||
nvmm_free_blocks_print_fn *free_blocks_print;
|
||||
nvmm_lun_info_print_fn *lun_info_print;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
* Runtime locking correctness validator
|
||||
*
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* see Documentation/locking/lockdep-design.txt for more details.
|
||||
*/
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#define MARVELL_PHY_ID_88E1318S 0x01410e90
|
||||
#define MARVELL_PHY_ID_88E1116R 0x01410e40
|
||||
#define MARVELL_PHY_ID_88E1510 0x01410dd0
|
||||
#define MARVELL_PHY_ID_88E1540 0x01410eb0
|
||||
#define MARVELL_PHY_ID_88E3016 0x01410e60
|
||||
|
||||
/* struct phy_device dev_flags definitions */
|
||||
|
@@ -426,6 +426,17 @@ enum {
|
||||
MLX4_MAX_FAST_REG_PAGES = 511,
|
||||
};
|
||||
|
||||
enum {
|
||||
/*
|
||||
* Max wqe size for rdma read is 512 bytes, so this
|
||||
* limits our max_sge_rd as the wqe needs to fit:
|
||||
* - ctrl segment (16 bytes)
|
||||
* - rdma segment (16 bytes)
|
||||
* - scatter elements (16 bytes each)
|
||||
*/
|
||||
MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
|
||||
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
|
||||
|
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 lro_cap[0x1];
|
||||
u8 lro_psh_flag[0x1];
|
||||
u8 lro_time_stamp[0x1];
|
||||
u8 reserved_0[0x6];
|
||||
u8 reserved_0[0x3];
|
||||
u8 self_lb_en_modifiable[0x1];
|
||||
u8 reserved_1[0x2];
|
||||
u8 max_lso_cap[0x5];
|
||||
u8 reserved_1[0x4];
|
||||
u8 reserved_2[0x4];
|
||||
u8 rss_ind_tbl_cap[0x4];
|
||||
u8 reserved_2[0x3];
|
||||
u8 reserved_3[0x3];
|
||||
u8 tunnel_lso_const_out_ip_id[0x1];
|
||||
u8 reserved_3[0x2];
|
||||
u8 reserved_4[0x2];
|
||||
u8 tunnel_statless_gre[0x1];
|
||||
u8 tunnel_stateless_vxlan[0x1];
|
||||
|
||||
u8 reserved_4[0x20];
|
||||
u8 reserved_5[0x20];
|
||||
|
||||
u8 reserved_5[0x10];
|
||||
u8 reserved_6[0x10];
|
||||
u8 lro_min_mss_size[0x10];
|
||||
|
||||
u8 reserved_6[0x120];
|
||||
u8 reserved_7[0x120];
|
||||
|
||||
u8 lro_timer_supported_periods[4][0x20];
|
||||
|
||||
u8 reserved_7[0x600];
|
||||
u8 reserved_8[0x600];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_roce_cap_bits {
|
||||
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_tir_bitmask_bits {
|
||||
u8 reserved[0x20];
|
||||
u8 reserved_0[0x20];
|
||||
|
||||
u8 reserved1[0x1f];
|
||||
u8 reserved_1[0x1b];
|
||||
u8 self_lb_en[0x1];
|
||||
u8 reserved_2[0x3];
|
||||
u8 lro[0x1];
|
||||
};
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
#ifndef LINUX_MM_DEBUG_H
|
||||
#define LINUX_MM_DEBUG_H 1
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
struct page;
|
||||
|
@@ -34,8 +34,12 @@ struct inode;
|
||||
struct file;
|
||||
struct net;
|
||||
|
||||
#define SOCK_ASYNC_NOSPACE 0
|
||||
#define SOCK_ASYNC_WAITDATA 1
|
||||
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
|
||||
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
|
||||
* Eventually all flags will be in sk->sk_wq_flags.
|
||||
*/
|
||||
#define SOCKWQ_ASYNC_NOSPACE 0
|
||||
#define SOCKWQ_ASYNC_WAITDATA 1
|
||||
#define SOCK_NOSPACE 2
|
||||
#define SOCK_PASSCRED 3
|
||||
#define SOCK_PASSSEC 4
|
||||
@@ -89,6 +93,7 @@ struct socket_wq {
|
||||
/* Note: wait MUST be first field of socket_wq */
|
||||
wait_queue_head_t wait;
|
||||
struct fasync_struct *fasync_list;
|
||||
unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
|
||||
struct rcu_head rcu;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
@@ -96,7 +101,7 @@ struct socket_wq {
|
||||
* struct socket - general BSD socket
|
||||
* @state: socket state (%SS_CONNECTED, etc)
|
||||
* @type: socket type (%SOCK_STREAM, etc)
|
||||
* @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
|
||||
* @flags: socket flags (%SOCK_NOSPACE, etc)
|
||||
* @ops: protocol specific socket operations
|
||||
* @file: File back pointer for gc
|
||||
* @sk: internal networking protocol agnostic socket representation
|
||||
@@ -202,7 +207,7 @@ enum {
|
||||
SOCK_WAKE_URG,
|
||||
};
|
||||
|
||||
int sock_wake_async(struct socket *sk, int how, int band);
|
||||
int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
|
||||
int sock_register(const struct net_proto_family *fam);
|
||||
void sock_unregister(int family);
|
||||
int __sock_create(struct net *net, int family, int type, int proto,
|
||||
|
@@ -1398,7 +1398,8 @@ enum netdev_priv_flags {
|
||||
* @dma: DMA channel
|
||||
* @mtu: Interface MTU value
|
||||
* @type: Interface hardware type
|
||||
* @hard_header_len: Hardware header length
|
||||
* @hard_header_len: Hardware header length, which means that this is the
|
||||
* minimum size of a packet.
|
||||
*
|
||||
* @needed_headroom: Extra headroom the hardware may need, but not in all
|
||||
* cases can this be guaranteed
|
||||
@@ -2068,20 +2069,23 @@ struct pcpu_sw_netstats {
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
#define netdev_alloc_pcpu_stats(type) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
|
||||
if (pcpu_stats) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
pcpu_stats; \
|
||||
#define __netdev_alloc_pcpu_stats(type, gfp) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
|
||||
if (pcpu_stats) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
pcpu_stats; \
|
||||
})
|
||||
|
||||
#define netdev_alloc_pcpu_stats(type) \
|
||||
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
/* netdevice notifier chain. Please remember to update the rtnetlink
|
||||
@@ -3854,6 +3858,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
|
||||
return dev->priv_flags & IFF_EBRIDGE;
|
||||
}
|
||||
|
||||
static inline bool netif_is_bridge_port(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_BRIDGE_PORT;
|
||||
}
|
||||
|
||||
static inline bool netif_is_ovs_master(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_OPENVSWITCH;
|
||||
|
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
|
||||
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
|
||||
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
|
||||
extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
|
||||
size_t len);
|
||||
size_t len, size_t align);
|
||||
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
|
||||
struct ip_set_ext *ext);
|
||||
|
||||
|
@@ -14,7 +14,7 @@ struct nfnl_callback {
|
||||
int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[]);
|
||||
int (*call_batch)(struct sock *nl, struct sk_buff *skb,
|
||||
int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[]);
|
||||
const struct nla_policy *policy; /* netlink attribute policy */
|
||||
|
@@ -5,10 +5,13 @@
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
static inline int nf_hook_ingress_active(struct sk_buff *skb)
|
||||
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
|
||||
{
|
||||
return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
|
||||
NFPROTO_NETDEV, NF_NETDEV_INGRESS);
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
|
||||
return false;
|
||||
#endif
|
||||
return !list_empty(&skb->dev->nf_hooks_ingress);
|
||||
}
|
||||
|
||||
static inline int nf_hook_ingress(struct sk_buff *skb)
|
||||
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
|
||||
struct nf_hook_state state;
|
||||
|
||||
nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
|
||||
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
|
||||
skb->dev, NULL, dev_net(skb->dev), NULL);
|
||||
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
|
||||
skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
|
||||
return nf_hook_slow(skb, &state);
|
||||
}
|
||||
|
||||
|
@@ -251,6 +251,7 @@ struct nfs4_layoutget {
|
||||
struct nfs4_layoutget_res res;
|
||||
struct rpc_cred *cred;
|
||||
gfp_t gfp_flags;
|
||||
long timeout;
|
||||
};
|
||||
|
||||
struct nfs4_getdeviceinfo_args {
|
||||
|
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
|
||||
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
||||
const char *name)
|
||||
{
|
||||
return NULL;
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
||||
|
@@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
|
||||
extern int of_irq_get_byname(struct device_node *dev, const char *name);
|
||||
extern int of_irq_to_resource_table(struct device_node *dev,
|
||||
struct resource *res, int nr_irqs);
|
||||
extern struct device_node *of_irq_find_parent(struct device_node *child);
|
||||
extern struct irq_domain *of_msi_get_domain(struct device *dev,
|
||||
struct device_node *np,
|
||||
enum irq_domain_bus_token token);
|
||||
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
|
||||
u32 rid);
|
||||
extern void of_msi_configure(struct device *dev, struct device_node *np);
|
||||
u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
|
||||
#else
|
||||
static inline int of_irq_count(struct device_node *dev)
|
||||
{
|
||||
@@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void *of_irq_find_parent(struct device_node *child)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct irq_domain *of_msi_get_domain(struct device *dev,
|
||||
struct device_node *np,
|
||||
enum irq_domain_bus_token token)
|
||||
@@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
|
||||
static inline void of_msi_configure(struct device *dev, struct device_node *np)
|
||||
{
|
||||
}
|
||||
static inline u32 of_msi_map_rid(struct device *dev,
|
||||
struct device_node *msi_np, u32 rid_in)
|
||||
{
|
||||
return rid_in;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
|
||||
@@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np)
|
||||
* so declare it here regardless of the CONFIG_OF_IRQ setting.
|
||||
*/
|
||||
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
|
||||
u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
|
||||
|
||||
#else /* !CONFIG_OF && !CONFIG_SPARC */
|
||||
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
|
||||
@@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 of_msi_map_rid(struct device *dev,
|
||||
struct device_node *msi_np, u32 rid_in)
|
||||
{
|
||||
return rid_in;
|
||||
}
|
||||
#endif /* !CONFIG_OF */
|
||||
|
||||
#endif /* __OF_IRQ_H */
|
||||
|
@@ -412,9 +412,18 @@ struct pci_host_bridge {
|
||||
void (*release_fn)(struct pci_host_bridge *);
|
||||
void *release_data;
|
||||
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
|
||||
/* Resource alignment requirements */
|
||||
resource_size_t (*align_resource)(struct pci_dev *dev,
|
||||
const struct resource *res,
|
||||
resource_size_t start,
|
||||
resource_size_t size,
|
||||
resource_size_t align);
|
||||
};
|
||||
|
||||
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
|
||||
|
||||
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
|
||||
|
||||
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
|
||||
void (*release_fn)(struct pci_host_bridge *),
|
||||
void *release_data);
|
||||
|
@@ -697,9 +697,11 @@ struct perf_cgroup {
|
||||
* if there is no cgroup event for the current CPU context.
|
||||
*/
|
||||
static inline struct perf_cgroup *
|
||||
perf_cgroup_from_task(struct task_struct *task)
|
||||
perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
|
||||
{
|
||||
return container_of(task_css(task, perf_event_cgrp_id),
|
||||
return container_of(task_css_check(task, perf_event_cgrp_id,
|
||||
ctx ? lockdep_is_held(&ctx->lock)
|
||||
: true),
|
||||
struct perf_cgroup, css);
|
||||
}
|
||||
#endif /* CONFIG_CGROUP_PERF */
|
||||
|
@@ -72,7 +72,7 @@ struct edma_soc_info {
|
||||
struct edma_rsv_info *rsv;
|
||||
|
||||
/* List of channels allocated for memcpy, terminated with -1 */
|
||||
s16 *memcpy_channels;
|
||||
s32 *memcpy_channels;
|
||||
|
||||
s8 (*queue_priority_mapping)[2];
|
||||
const s16 (*xbar_chans)[2];
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* FLoating proportions
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* This file contains the public data structure and API definitions.
|
||||
*/
|
||||
|
@@ -9,6 +9,8 @@
|
||||
#ifndef __COMMON_HSI__
|
||||
#define __COMMON_HSI__
|
||||
|
||||
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
|
||||
|
||||
#define FW_MAJOR_VERSION 8
|
||||
#define FW_MINOR_VERSION 4
|
||||
#define FW_REVISION_VERSION 2
|
||||
|
@@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
|
||||
used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
|
||||
(u32)p_chain->cons_idx;
|
||||
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
|
||||
used -= (used / p_chain->elem_per_page);
|
||||
used -= p_chain->prod_idx / p_chain->elem_per_page -
|
||||
p_chain->cons_idx / p_chain->elem_per_page;
|
||||
|
||||
return p_chain->capacity - used;
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/list_nulls.h>
|
||||
@@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
|
||||
int rhashtable_init(struct rhashtable *ht,
|
||||
const struct rhashtable_params *params);
|
||||
|
||||
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
||||
struct rhash_head *obj,
|
||||
struct bucket_table *old_tbl);
|
||||
int rhashtable_insert_rehash(struct rhashtable *ht);
|
||||
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
|
||||
const void *key,
|
||||
struct rhash_head *obj,
|
||||
struct bucket_table *old_tbl);
|
||||
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
|
||||
|
||||
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
|
||||
void rhashtable_walk_exit(struct rhashtable_iter *iter);
|
||||
@@ -598,9 +600,11 @@ restart:
|
||||
|
||||
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
|
||||
if (unlikely(new_tbl)) {
|
||||
err = rhashtable_insert_slow(ht, key, obj, new_tbl);
|
||||
if (err == -EAGAIN)
|
||||
tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
|
||||
if (!IS_ERR_OR_NULL(tbl))
|
||||
goto slow_path;
|
||||
|
||||
err = PTR_ERR(tbl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -611,7 +615,7 @@ restart:
|
||||
if (unlikely(rht_grow_above_100(ht, tbl))) {
|
||||
slow_path:
|
||||
spin_unlock_bh(lock);
|
||||
err = rhashtable_insert_rehash(ht);
|
||||
err = rhashtable_insert_rehash(ht, tbl);
|
||||
rcu_read_unlock();
|
||||
if (err)
|
||||
return err;
|
||||
|
@@ -71,7 +71,7 @@ struct scpi_ops {
|
||||
int (*sensor_get_value)(u16, u32 *);
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL)
|
||||
#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
|
||||
struct scpi_ops *get_scpi_ops(void);
|
||||
#else
|
||||
static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
|
||||
|
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
|
||||
extern void set_current_blocked(sigset_t *);
|
||||
extern void __set_current_blocked(const sigset_t *);
|
||||
extern int show_unhandled_signals;
|
||||
extern int sigsuspend(sigset_t *);
|
||||
|
||||
struct sigaction {
|
||||
#ifndef __ARCH_HAS_IRIX_SIGACTION
|
||||
|
@@ -157,6 +157,24 @@ size_t ksize(const void *);
|
||||
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
* aligned buffers.
|
||||
*/
|
||||
#ifndef ARCH_SLAB_MINALIGN
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
|
||||
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
|
||||
* aligned pointers.
|
||||
*/
|
||||
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
|
||||
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
|
||||
#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* Kmalloc array related definitions
|
||||
*/
|
||||
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
|
||||
}
|
||||
#endif /* !CONFIG_SLOB */
|
||||
|
||||
void *__kmalloc(size_t size, gfp_t flags);
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
|
||||
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
|
||||
void kmem_cache_free(struct kmem_cache *, void *);
|
||||
|
||||
/*
|
||||
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
|
||||
* Note that interrupts must be enabled when calling these functions.
|
||||
*/
|
||||
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
||||
bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
||||
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
|
||||
#else
|
||||
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size);
|
||||
int node, size_t size) __assume_slab_alignment;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
||||
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
return __kmalloc_node(size, flags, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
|
||||
* Intended for arches that get misalignment faults even for 64 bit integer
|
||||
* aligned buffers.
|
||||
*/
|
||||
#ifndef ARCH_SLAB_MINALIGN
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
struct memcg_cache_array {
|
||||
struct rcu_head rcu;
|
||||
struct kmem_cache *entries[0];
|
||||
|
@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
|
||||
* grabbing every spinlock (and more). So the "read" side to such a
|
||||
* lock is anything which disables preemption.
|
||||
*/
|
||||
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
/**
|
||||
* stop_machine: freeze the machine on all CPUs and run this function
|
||||
@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
|
||||
|
||||
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
|
||||
const struct cpumask *cpus);
|
||||
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static inline int stop_machine(cpu_stop_fn_t fn, void *data,
|
||||
const struct cpumask *cpus)
|
||||
@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
|
||||
return stop_machine(fn, data, cpus);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
|
||||
#endif /* _LINUX_STOP_MACHINE */
|
||||
|
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
|
||||
asmlinkage long sys_lchown(const char __user *filename,
|
||||
uid_t user, gid_t group);
|
||||
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
|
||||
#ifdef CONFIG_UID16
|
||||
#ifdef CONFIG_HAVE_UID16
|
||||
asmlinkage long sys_chown16(const char __user *filename,
|
||||
old_uid_t user, old_gid_t group);
|
||||
asmlinkage long sys_lchown16(const char __user *filename,
|
||||
|
@@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister(
|
||||
static inline int thermal_zone_bind_cooling_device(
|
||||
struct thermal_zone_device *tz, int trip,
|
||||
struct thermal_cooling_device *cdev,
|
||||
unsigned long upper, unsigned long lower)
|
||||
unsigned long upper, unsigned long lower,
|
||||
unsigned int weight)
|
||||
{ return -ENODEV; }
|
||||
static inline int thermal_zone_unbind_cooling_device(
|
||||
struct thermal_zone_device *tz, int trip,
|
||||
|
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
|
||||
|
||||
/* tty_audit.c */
|
||||
#ifdef CONFIG_AUDIT
|
||||
extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
|
||||
extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
|
||||
size_t size, unsigned icanon);
|
||||
extern void tty_audit_exit(void);
|
||||
extern void tty_audit_fork(struct signal_struct *sig);
|
||||
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
|
||||
extern void tty_audit_push(struct tty_struct *tty);
|
||||
extern int tty_audit_push_current(void);
|
||||
#else
|
||||
static inline void tty_audit_add_data(struct tty_struct *tty,
|
||||
unsigned char *data, size_t size, unsigned icanon)
|
||||
static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
|
||||
size_t size, unsigned icanon)
|
||||
{
|
||||
}
|
||||
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
|
||||
|
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
|
||||
|
||||
typedef unsigned long uintptr_t;
|
||||
|
||||
#ifdef CONFIG_UID16
|
||||
#ifdef CONFIG_HAVE_UID16
|
||||
/* This is defined by include/asm-{arch}/posix_types.h */
|
||||
typedef __kernel_old_uid_t old_uid_t;
|
||||
typedef __kernel_old_gid_t old_gid_t;
|
||||
|
@@ -21,7 +21,7 @@
|
||||
* Authors:
|
||||
* Srikar Dronamraju
|
||||
* Jim Keniston
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
@@ -47,4 +47,7 @@
|
||||
/* device generates spurious wakeup, ignore remote wakeup capability */
|
||||
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
|
||||
|
||||
/* device can't handle Link Power Management */
|
||||
#define USB_QUIRK_NO_LPM BIT(10)
|
||||
|
||||
#endif /* __LINUX_USB_QUIRKS_H */
|
||||
|
@@ -44,9 +44,6 @@ struct vfio_device_ops {
|
||||
void (*request)(void *device_data, unsigned int count);
|
||||
};
|
||||
|
||||
extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
|
||||
extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
|
||||
|
||||
extern int vfio_add_group_dev(struct device *dev,
|
||||
const struct vfio_device_ops *ops,
|
||||
void *device_data);
|
||||
|
@@ -145,7 +145,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
|
||||
list_del(&old->task_list);
|
||||
}
|
||||
|
||||
typedef int wait_bit_action_f(struct wait_bit_key *);
|
||||
typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
|
||||
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
|
||||
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
|
||||
@@ -960,10 +960,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||
} while (0)
|
||||
|
||||
|
||||
extern int bit_wait(struct wait_bit_key *);
|
||||
extern int bit_wait_io(struct wait_bit_key *);
|
||||
extern int bit_wait_timeout(struct wait_bit_key *);
|
||||
extern int bit_wait_io_timeout(struct wait_bit_key *);
|
||||
extern int bit_wait(struct wait_bit_key *, int);
|
||||
extern int bit_wait_io(struct wait_bit_key *, int);
|
||||
extern int bit_wait_timeout(struct wait_bit_key *, int);
|
||||
extern int bit_wait_io_timeout(struct wait_bit_key *, int);
|
||||
|
||||
/**
|
||||
* wait_on_bit - wait for a bit to be cleared
|
||||
|
Reference in New Issue
Block a user