Merge tag 'v5.0-rc6' into devel
Linux 5.0-rc6
This commit is contained in:
@@ -190,6 +190,7 @@ struct backing_dev_info {
|
||||
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
|
||||
struct rb_root cgwb_congested_tree; /* their congested states */
|
||||
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
|
||||
struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
|
||||
#else
|
||||
struct bdi_writeback_congested *wb_congested;
|
||||
#endif
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
struct bcma_soc {
|
||||
struct bcma_bus bus;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
int __init bcma_host_soc_register(struct bcma_soc *soc);
|
||||
|
@@ -287,7 +287,7 @@ enum req_opf {
|
||||
REQ_OP_DISCARD = 3,
|
||||
/* securely erase sectors */
|
||||
REQ_OP_SECURE_ERASE = 5,
|
||||
/* seset a zone write pointer */
|
||||
/* reset a zone write pointer */
|
||||
REQ_OP_ZONE_RESET = 6,
|
||||
/* write the same sector many times */
|
||||
REQ_OP_WRITE_SAME = 7,
|
||||
|
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
|
||||
|
||||
static inline sector_t blk_rq_trace_sector(struct request *rq)
|
||||
{
|
||||
return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
|
||||
/*
|
||||
* Tracing should ignore starting sector for passthrough requests and
|
||||
* requests where starting sector didn't get set.
|
||||
*/
|
||||
if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
|
||||
return 0;
|
||||
return blk_rq_pos(rq);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
|
||||
|
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list {
|
||||
#define BPF_ALU_SANITIZE_SRC 1U
|
||||
#define BPF_ALU_SANITIZE_DST 2U
|
||||
#define BPF_ALU_NEG_VALUE (1U << 2)
|
||||
#define BPF_ALU_NON_POINTER (1U << 3)
|
||||
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
|
||||
BPF_ALU_SANITIZE_DST)
|
||||
|
||||
|
@@ -3,13 +3,22 @@
|
||||
#define _LINUX_BPFILTER_H
|
||||
|
||||
#include <uapi/linux/bpfilter.h>
|
||||
#include <linux/umh.h>
|
||||
|
||||
struct sock;
|
||||
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||
unsigned int optlen);
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||
int __user *optlen);
|
||||
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
|
||||
char __user *optval,
|
||||
unsigned int optlen, bool is_set);
|
||||
struct bpfilter_umh_ops {
|
||||
struct umh_info info;
|
||||
/* since ip_getsockopt() can run in parallel, serialize access to umh */
|
||||
struct mutex lock;
|
||||
int (*sockopt)(struct sock *sk, int optname,
|
||||
char __user *optval,
|
||||
unsigned int optlen, bool is_set);
|
||||
int (*start)(void);
|
||||
bool stop;
|
||||
};
|
||||
extern struct bpfilter_umh_ops bpfilter_ops;
|
||||
#endif
|
||||
|
@@ -35,6 +35,7 @@
|
||||
#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
|
||||
#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
|
||||
#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
|
||||
#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
|
||||
|
||||
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
|
||||
|
||||
@@ -53,7 +54,7 @@ struct ceph_options {
|
||||
unsigned long osd_request_timeout; /* jiffies */
|
||||
|
||||
/*
|
||||
* any type that can't be simply compared or doesn't need need
|
||||
* any type that can't be simply compared or doesn't need
|
||||
* to be compared should go beyond this point,
|
||||
* ceph_compare_options() should be updated accordingly
|
||||
*/
|
||||
@@ -281,7 +282,8 @@ extern struct ceph_options *ceph_parse_options(char *options,
|
||||
const char *dev_name, const char *dev_name_end,
|
||||
int (*parse_extra_token)(char *c, void *private),
|
||||
void *private);
|
||||
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
|
||||
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
|
||||
bool show_all);
|
||||
extern void ceph_destroy_options(struct ceph_options *opt);
|
||||
extern int ceph_compare_options(struct ceph_options *new_opt,
|
||||
struct ceph_client *client);
|
||||
|
@@ -354,7 +354,6 @@ struct ceph_osd_client {
|
||||
struct rb_root linger_map_checks;
|
||||
atomic_t num_requests;
|
||||
atomic_t num_homeless;
|
||||
bool abort_on_full; /* abort w/ ENOSPC when full */
|
||||
int abort_err;
|
||||
struct delayed_work timeout_work;
|
||||
struct delayed_work osds_timeout_work;
|
||||
|
@@ -3,9 +3,8 @@
|
||||
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
|
||||
#endif
|
||||
|
||||
/* Some compiler specific definitions are overwritten here
|
||||
* for Clang compiler
|
||||
*/
|
||||
/* Compiler specific definitions for Clang compiler */
|
||||
|
||||
#define uninitialized_var(x) x = *(&(x))
|
||||
|
||||
/* same as gcc, this was present in clang-2.6 so we can assume it works
|
||||
|
@@ -58,17 +58,13 @@
|
||||
(typeof(ptr)) (__ptr + (off)); \
|
||||
})
|
||||
|
||||
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
||||
#define OPTIMIZER_HIDE_VAR(var) \
|
||||
__asm__ ("" : "=r" (var) : "0" (var))
|
||||
|
||||
/*
|
||||
* A trick to suppress uninitialized variable warning without generating any
|
||||
* code
|
||||
*/
|
||||
#define uninitialized_var(x) x = x
|
||||
|
||||
#ifdef RETPOLINE
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#define __noretpoline __attribute__((__indirect_branch__("keep")))
|
||||
#endif
|
||||
|
||||
|
@@ -5,9 +5,7 @@
|
||||
|
||||
#ifdef __ECC
|
||||
|
||||
/* Some compiler specific definitions are overwritten here
|
||||
* for Intel ECC compiler
|
||||
*/
|
||||
/* Compiler specific definitions for Intel ECC compiler */
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
|
||||
|
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
#endif
|
||||
|
||||
#ifndef OPTIMIZER_HIDE_VAR
|
||||
#define OPTIMIZER_HIDE_VAR(var) barrier()
|
||||
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
||||
#define OPTIMIZER_HIDE_VAR(var) \
|
||||
__asm__ ("" : "=r" (var) : "0" (var))
|
||||
#endif
|
||||
|
||||
/* Not-quite-unique ID. */
|
||||
|
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
||||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology_early(void);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology_early(void) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
#endif
|
||||
|
||||
|
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
|
||||
struct dentry_stat_t {
|
||||
long nr_dentry;
|
||||
long nr_unused;
|
||||
long age_limit; /* age in seconds */
|
||||
long want_pages; /* pages requested by system */
|
||||
long dummy[2];
|
||||
long age_limit; /* age in seconds */
|
||||
long want_pages; /* pages requested by system */
|
||||
long nr_negative; /* # of unused negative dentries */
|
||||
long dummy; /* Reserved for future use */
|
||||
};
|
||||
extern struct dentry_stat_t dentry_stat;
|
||||
|
||||
|
@@ -717,15 +717,6 @@ static inline unsigned long dma_max_pfn(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Please always use dma_alloc_coherent instead as it already zeroes the memory!
|
||||
*/
|
||||
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
return dma_alloc_coherent(dev, size, dma_handle, flag);
|
||||
}
|
||||
|
||||
static inline int dma_get_cache_alignment(void)
|
||||
{
|
||||
#ifdef ARCH_DMA_MINALIGN
|
||||
|
@@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info);
|
||||
|
||||
extern struct fb_info *registered_fb[FB_MAX];
|
||||
extern int num_registered_fb;
|
||||
extern bool fb_center_logo;
|
||||
extern struct class *fb_class;
|
||||
|
||||
#define for_each_registered_fb(i) \
|
||||
|
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
||||
return qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u8 cb_saved[BPF_SKB_CB_LEN];
|
||||
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u32 res;
|
||||
|
||||
preempt_disable();
|
||||
res = __bpf_prog_run_save_cb(prog, skb);
|
||||
preempt_enable();
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u32 res;
|
||||
|
||||
if (unlikely(prog->cb_access))
|
||||
memset(cb_data, 0, BPF_SKB_CB_LEN);
|
||||
|
||||
return BPF_PROG_RUN(prog, skb);
|
||||
preempt_disable();
|
||||
res = BPF_PROG_RUN(prog, skb);
|
||||
preempt_enable();
|
||||
return res;
|
||||
}
|
||||
|
||||
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
|
@@ -1479,11 +1479,12 @@ struct super_block {
|
||||
struct user_namespace *s_user_ns;
|
||||
|
||||
/*
|
||||
* Keep the lru lists last in the structure so they always sit on their
|
||||
* own individual cachelines.
|
||||
* The list_lru structure is essentially just a pointer to a table
|
||||
* of per-node lru lists, each of which has its own spinlock.
|
||||
* There is no need to put them into separate cachelines.
|
||||
*/
|
||||
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
|
||||
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
|
||||
struct list_lru s_dentry_lru;
|
||||
struct list_lru s_inode_lru;
|
||||
struct rcu_head rcu;
|
||||
struct work_struct destroy_work;
|
||||
|
||||
|
@@ -24,7 +24,10 @@
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
#define HID_DEBUG_BUFSIZE 512
|
||||
#define HID_DEBUG_FIFOSIZE 512
|
||||
|
||||
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
|
||||
void hid_dump_report(struct hid_device *, int , u8 *, int);
|
||||
@@ -37,11 +40,8 @@ void hid_debug_init(void);
|
||||
void hid_debug_exit(void);
|
||||
void hid_debug_event(struct hid_device *, char *);
|
||||
|
||||
|
||||
struct hid_debug_list {
|
||||
char *hid_debug_buf;
|
||||
int head;
|
||||
int tail;
|
||||
DECLARE_KFIFO_PTR(hid_debug_fifo, char);
|
||||
struct fasync_struct *fasync;
|
||||
struct hid_device *hdev;
|
||||
struct list_head node;
|
||||
@@ -64,4 +64,3 @@ struct hid_debug_list {
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -430,7 +430,7 @@ struct hid_local {
|
||||
*/
|
||||
|
||||
struct hid_collection {
|
||||
struct hid_collection *parent;
|
||||
int parent_idx; /* device->collection */
|
||||
unsigned type;
|
||||
unsigned usage;
|
||||
unsigned level;
|
||||
@@ -658,7 +658,6 @@ struct hid_parser {
|
||||
unsigned int *collection_stack;
|
||||
unsigned int collection_stack_ptr;
|
||||
unsigned int collection_stack_size;
|
||||
struct hid_collection *active_collection;
|
||||
struct hid_device *device;
|
||||
unsigned int scan_flags;
|
||||
};
|
||||
|
@@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
|
||||
u32 bytes_avail_towrite;
|
||||
};
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/* Vmbus interface */
|
||||
#define vmbus_driver_register(driver) \
|
||||
|
@@ -615,6 +615,7 @@ struct ide_drive_s {
|
||||
|
||||
/* current sense rq and buffer */
|
||||
bool sense_rq_armed;
|
||||
bool sense_rq_active;
|
||||
struct request *sense_rq;
|
||||
struct request_sense sense_data;
|
||||
|
||||
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
|
||||
extern void ide_timer_expiry(struct timer_list *t);
|
||||
extern irqreturn_t ide_intr(int irq, void *dev_id);
|
||||
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
||||
extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
|
||||
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
|
||||
|
||||
void ide_init_disk(struct gendisk *, ide_drive_t *);
|
||||
|
@@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
|
||||
case ARPHRD_IPGRE:
|
||||
case ARPHRD_VOID:
|
||||
case ARPHRD_NONE:
|
||||
case ARPHRD_RAWIP:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
|
@@ -260,6 +260,7 @@ struct irq_affinity {
|
||||
/**
|
||||
* struct irq_affinity_desc - Interrupt affinity descriptor
|
||||
* @mask: cpumask to hold the affinity assignment
|
||||
* @is_managed: 1 if the interrupt is managed internally
|
||||
*/
|
||||
struct irq_affinity_desc {
|
||||
struct cpumask mask;
|
||||
|
@@ -319,7 +319,7 @@
|
||||
#define GITS_TYPER_PLPIS (1UL << 0)
|
||||
#define GITS_TYPER_VLPIS (1UL << 1)
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
|
||||
#define GITS_TYPER_IDBITS_SHIFT 8
|
||||
#define GITS_TYPER_DEVBITS_SHIFT 13
|
||||
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
|
||||
|
@@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
|
||||
}
|
||||
|
||||
enum nvdimm_security_state {
|
||||
NVDIMM_SECURITY_ERROR = -1,
|
||||
NVDIMM_SECURITY_DISABLED,
|
||||
NVDIMM_SECURITY_UNLOCKED,
|
||||
NVDIMM_SECURITY_LOCKED,
|
||||
@@ -234,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
|
||||
cmd_mask, num_flush, flush_wpq, NULL, NULL);
|
||||
}
|
||||
|
||||
int nvdimm_security_setup_events(struct nvdimm *nvdimm);
|
||||
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
|
||||
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
|
||||
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
|
||||
|
@@ -21,14 +21,16 @@ struct vmem_altmap;
|
||||
* walkers which rely on the fully initialized page->flags and others
|
||||
* should use this rather than pfn_valid && pfn_to_page
|
||||
*/
|
||||
#define pfn_to_online_page(pfn) \
|
||||
({ \
|
||||
struct page *___page = NULL; \
|
||||
unsigned long ___nr = pfn_to_section_nr(pfn); \
|
||||
\
|
||||
if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
|
||||
___page = pfn_to_page(pfn); \
|
||||
___page; \
|
||||
#define pfn_to_online_page(pfn) \
|
||||
({ \
|
||||
struct page *___page = NULL; \
|
||||
unsigned long ___pfn = pfn; \
|
||||
unsigned long ___nr = pfn_to_section_nr(___pfn); \
|
||||
\
|
||||
if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
|
||||
pfn_valid_within(___pfn)) \
|
||||
___page = pfn_to_page(___pfn); \
|
||||
___page; \
|
||||
})
|
||||
|
||||
/*
|
||||
|
@@ -2790,6 +2790,100 @@ struct ec_response_battery_vendor_param {
|
||||
uint32_t value;
|
||||
} __packed;
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Commands for I2S recording on audio codec. */
|
||||
|
||||
#define EC_CMD_CODEC_I2S 0x00BC
|
||||
|
||||
enum ec_codec_i2s_subcmd {
|
||||
EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
|
||||
EC_CODEC_SET_GAIN = 0x1,
|
||||
EC_CODEC_GET_GAIN = 0x2,
|
||||
EC_CODEC_I2S_ENABLE = 0x3,
|
||||
EC_CODEC_I2S_SET_CONFIG = 0x4,
|
||||
EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
|
||||
EC_CODEC_I2S_SET_BCLK = 0x6,
|
||||
};
|
||||
|
||||
enum ec_sample_depth_value {
|
||||
EC_CODEC_SAMPLE_DEPTH_16 = 0,
|
||||
EC_CODEC_SAMPLE_DEPTH_24 = 1,
|
||||
};
|
||||
|
||||
enum ec_i2s_config {
|
||||
EC_DAI_FMT_I2S = 0,
|
||||
EC_DAI_FMT_RIGHT_J = 1,
|
||||
EC_DAI_FMT_LEFT_J = 2,
|
||||
EC_DAI_FMT_PCM_A = 3,
|
||||
EC_DAI_FMT_PCM_B = 4,
|
||||
EC_DAI_FMT_PCM_TDM = 5,
|
||||
};
|
||||
|
||||
struct ec_param_codec_i2s {
|
||||
/*
|
||||
* enum ec_codec_i2s_subcmd
|
||||
*/
|
||||
uint8_t cmd;
|
||||
union {
|
||||
/*
|
||||
* EC_CODEC_SET_SAMPLE_DEPTH
|
||||
* Value should be one of ec_sample_depth_value.
|
||||
*/
|
||||
uint8_t depth;
|
||||
|
||||
/*
|
||||
* EC_CODEC_SET_GAIN
|
||||
* Value should be 0~43 for both channels.
|
||||
*/
|
||||
struct ec_param_codec_i2s_set_gain {
|
||||
uint8_t left;
|
||||
uint8_t right;
|
||||
} __packed gain;
|
||||
|
||||
/*
|
||||
* EC_CODEC_I2S_ENABLE
|
||||
* 1 to enable, 0 to disable.
|
||||
*/
|
||||
uint8_t i2s_enable;
|
||||
|
||||
/*
|
||||
* EC_CODEC_I2S_SET_COFNIG
|
||||
* Value should be one of ec_i2s_config.
|
||||
*/
|
||||
uint8_t i2s_config;
|
||||
|
||||
/*
|
||||
* EC_CODEC_I2S_SET_TDM_CONFIG
|
||||
* Value should be one of ec_i2s_config.
|
||||
*/
|
||||
struct ec_param_codec_i2s_tdm {
|
||||
/*
|
||||
* 0 to 496
|
||||
*/
|
||||
int16_t ch0_delay;
|
||||
/*
|
||||
* -1 to 496
|
||||
*/
|
||||
int16_t ch1_delay;
|
||||
uint8_t adjacent_to_ch0;
|
||||
uint8_t adjacent_to_ch1;
|
||||
} __packed tdm_param;
|
||||
|
||||
/*
|
||||
* EC_CODEC_I2S_SET_BCLK
|
||||
*/
|
||||
uint32_t bclk;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* For subcommand EC_CODEC_GET_GAIN.
|
||||
*/
|
||||
struct ec_response_codec_gain {
|
||||
uint8_t left;
|
||||
uint8_t right;
|
||||
} __packed;
|
||||
|
||||
/*****************************************************************************/
|
||||
/* System commands */
|
||||
|
||||
|
@@ -41,7 +41,7 @@
|
||||
#define TCU_TCSR_PRESCALE_LSB 3
|
||||
#define TCU_TCSR_PRESCALE_MASK 0x38
|
||||
|
||||
#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
|
||||
#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
|
||||
#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
|
||||
#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mfd/madera/pdata.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
@@ -37,6 +38,8 @@ enum madera_type {
|
||||
|
||||
#define MADERA_MAX_MICBIAS 4
|
||||
|
||||
#define MADERA_MAX_HP_OUTPUT 3
|
||||
|
||||
/* Notifier events */
|
||||
#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
|
||||
#define MADERA_NOTIFY_HPDET 0x2
|
||||
@@ -183,6 +186,10 @@ struct madera {
|
||||
unsigned int num_childbias[MADERA_MAX_MICBIAS];
|
||||
|
||||
struct snd_soc_dapm_context *dapm;
|
||||
struct mutex dapm_ptr_lock;
|
||||
unsigned int hp_ena;
|
||||
bool out_clamp[MADERA_MAX_HP_OUTPUT];
|
||||
bool out_shorted[MADERA_MAX_HP_OUTPUT];
|
||||
|
||||
struct blocking_notifier_head notifier;
|
||||
};
|
||||
|
@@ -78,6 +78,8 @@
|
||||
#define STEPCONFIG_YNN BIT(8)
|
||||
#define STEPCONFIG_XNP BIT(9)
|
||||
#define STEPCONFIG_YPN BIT(10)
|
||||
#define STEPCONFIG_RFP(val) ((val) << 12)
|
||||
#define STEPCONFIG_RFP_VREFP (0x3 << 12)
|
||||
#define STEPCONFIG_INM_MASK (0xF << 15)
|
||||
#define STEPCONFIG_INM(val) ((val) << 15)
|
||||
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
|
||||
@@ -86,6 +88,8 @@
|
||||
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
|
||||
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
|
||||
#define STEPCONFIG_FIFO1 BIT(26)
|
||||
#define STEPCONFIG_RFM(val) ((val) << 23)
|
||||
#define STEPCONFIG_RFM_VREFN (0x3 << 23)
|
||||
|
||||
/* Delay register */
|
||||
#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
|
||||
|
@@ -79,7 +79,7 @@
|
||||
/* Some controllers have a CBSY bit */
|
||||
#define TMIO_MMC_HAVE_CBSY BIT(11)
|
||||
|
||||
/* Some controllers that support HS400 use use 4 taps while others use 8. */
|
||||
/* Some controllers that support HS400 use 4 taps while others use 8. */
|
||||
#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
|
||||
|
||||
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
|
||||
|
@@ -520,6 +520,12 @@ enum pgdat_flags {
|
||||
PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
|
||||
};
|
||||
|
||||
enum zone_flags {
|
||||
ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
|
||||
* Cleared when kswapd is woken.
|
||||
*/
|
||||
};
|
||||
|
||||
static inline unsigned long zone_managed_pages(struct zone *zone)
|
||||
{
|
||||
return (unsigned long)atomic_long_read(&zone->managed_pages);
|
||||
|
@@ -828,7 +828,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
|
||||
static inline void module_bug_cleanup(struct module *mod) {}
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
#ifdef RETPOLINE
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
extern bool retpoline_module_ok(bool has_retpoline);
|
||||
#else
|
||||
static inline bool retpoline_module_ok(bool has_retpoline)
|
||||
|
@@ -1483,6 +1483,7 @@ struct net_device_ops {
|
||||
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
|
||||
* @IFF_FAILOVER: device is a failover master device
|
||||
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
|
||||
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
|
||||
*/
|
||||
enum netdev_priv_flags {
|
||||
IFF_802_1Q_VLAN = 1<<0,
|
||||
@@ -1514,6 +1515,7 @@ enum netdev_priv_flags {
|
||||
IFF_NO_RX_HANDLER = 1<<26,
|
||||
IFF_FAILOVER = 1<<27,
|
||||
IFF_FAILOVER_SLAVE = 1<<28,
|
||||
IFF_L3MDEV_RX_HANDLER = 1<<29,
|
||||
};
|
||||
|
||||
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
|
||||
@@ -1544,6 +1546,7 @@ enum netdev_priv_flags {
|
||||
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
|
||||
#define IFF_FAILOVER IFF_FAILOVER
|
||||
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
|
||||
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
|
||||
|
||||
/**
|
||||
* struct net_device - The DEVICE structure.
|
||||
@@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
|
||||
return dev->priv_flags & IFF_SUPP_NOFCS;
|
||||
}
|
||||
|
||||
static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
|
||||
}
|
||||
|
||||
static inline bool netif_is_l3_master(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_L3MDEV_MASTER;
|
||||
|
@@ -50,7 +50,6 @@ struct of_irq_controller;
|
||||
|
||||
struct device_node {
|
||||
const char *name;
|
||||
const char *type;
|
||||
phandle phandle;
|
||||
const char *full_name;
|
||||
struct fwnode_handle fwnode;
|
||||
|
@@ -24,7 +24,7 @@ static inline void *
|
||||
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
|
||||
return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
|
||||
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
|
||||
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
|
||||
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
|
||||
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
|
||||
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
|
||||
|
||||
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
|
||||
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
|
||||
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
|
||||
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
|
||||
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
|
||||
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
|
||||
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
|
||||
|
||||
extern const int phy_10_100_features_array[4];
|
||||
@@ -467,8 +469,8 @@ struct phy_device {
|
||||
* only works for PHYs with IDs which match this field
|
||||
* name: The friendly name of this PHY type
|
||||
* phy_id_mask: Defines the important bits of the phy_id
|
||||
* features: A list of features (speed, duplex, etc) supported
|
||||
* by this PHY
|
||||
* features: A mandatory list of features (speed, duplex, etc)
|
||||
* supported by this PHY
|
||||
* flags: A bitfield defining certain other features this PHY
|
||||
* supports (like interrupts)
|
||||
*
|
||||
|
@@ -42,6 +42,7 @@ enum phy_mode {
|
||||
PHY_MODE_PCIE,
|
||||
PHY_MODE_ETHERNET,
|
||||
PHY_MODE_MIPI_DPHY,
|
||||
PHY_MODE_SATA
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp);
|
||||
int dev_pm_opp_add(struct device *dev, unsigned long freq,
|
||||
unsigned long u_volt);
|
||||
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
|
||||
void dev_pm_opp_remove_all_dynamic(struct device *dev);
|
||||
|
||||
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
|
||||
|
||||
@@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
|
||||
|
||||
static inline void pm_runtime_mark_last_busy(struct device *dev)
|
||||
{
|
||||
WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
|
||||
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
|
||||
}
|
||||
|
||||
static inline bool pm_runtime_is_irq_safe(struct device *dev)
|
||||
|
@@ -13,6 +13,7 @@
|
||||
#ifndef __QCOM_SCM_H
|
||||
#define __QCOM_SCM_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
|
@@ -663,6 +663,37 @@ out:
|
||||
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
|
||||
u32 prod_idx, void *p_prod_elem)
|
||||
{
|
||||
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
|
||||
u32 cur_prod, page_mask, page_cnt, page_diff;
|
||||
|
||||
cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
|
||||
p_chain->u.chain32.prod_idx;
|
||||
|
||||
/* Assume that number of elements in a page is power of 2 */
|
||||
page_mask = ~p_chain->elem_per_page_mask;
|
||||
|
||||
/* Use "cur_prod - 1" and "prod_idx - 1" since producer index
|
||||
* reaches the first element of next page before the page index
|
||||
* is incremented. See qed_chain_produce().
|
||||
* Index wrap around is not a problem because the difference
|
||||
* between current and given producer indices is always
|
||||
* positive and lower than the chain's capacity.
|
||||
*/
|
||||
page_diff = (((cur_prod - 1) & page_mask) -
|
||||
((prod_idx - 1) & page_mask)) /
|
||||
p_chain->elem_per_page;
|
||||
|
||||
page_cnt = qed_chain_get_page_cnt(p_chain);
|
||||
if (is_chain_u16(p_chain))
|
||||
p_chain->pbl.c.u16.prod_page_idx =
|
||||
(p_chain->pbl.c.u16.prod_page_idx -
|
||||
page_diff + page_cnt) % page_cnt;
|
||||
else
|
||||
p_chain->pbl.c.u32.prod_page_idx =
|
||||
(p_chain->pbl.c.u32.prod_page_idx -
|
||||
page_diff + page_cnt) % page_cnt;
|
||||
}
|
||||
|
||||
if (is_chain_u16(p_chain))
|
||||
p_chain->u.chain16.prod_idx = (u16) prod_idx;
|
||||
else
|
||||
|
@@ -32,6 +32,8 @@ struct reset_control *devm_reset_control_array_get(struct device *dev,
|
||||
struct reset_control *of_reset_control_array_get(struct device_node *np,
|
||||
bool shared, bool optional);
|
||||
|
||||
int reset_control_get_count(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int reset_control_reset(struct reset_control *rstc)
|
||||
@@ -97,6 +99,11 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
|
||||
return optional ? NULL : ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline int reset_control_get_count(struct device *dev)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RESET_CONTROLLER */
|
||||
|
||||
static inline int __must_check device_reset(struct device *dev)
|
||||
@@ -138,7 +145,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
|
||||
*
|
||||
* Returns a struct reset_control or IS_ERR() condition containing errno.
|
||||
* This function is intended for use with reset-controls which are shared
|
||||
* between hardware-blocks.
|
||||
* between hardware blocks.
|
||||
*
|
||||
* When a reset-control is shared, the behavior of reset_control_assert /
|
||||
* deassert is changed, the reset-core will keep track of a deassert_count
|
||||
@@ -187,7 +194,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
|
||||
}
|
||||
|
||||
/**
|
||||
* of_reset_control_get_shared - Lookup and obtain an shared reference
|
||||
* of_reset_control_get_shared - Lookup and obtain a shared reference
|
||||
* to a reset controller.
|
||||
* @node: device to be reset by the controller
|
||||
* @id: reset line name
|
||||
@@ -229,7 +236,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
|
||||
}
|
||||
|
||||
/**
|
||||
* of_reset_control_get_shared_by_index - Lookup and obtain an shared
|
||||
* of_reset_control_get_shared_by_index - Lookup and obtain a shared
|
||||
* reference to a reset controller
|
||||
* by index.
|
||||
* @node: device to be reset by the controller
|
||||
@@ -322,7 +329,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
|
||||
|
||||
/**
|
||||
* devm_reset_control_get_shared_by_index - resource managed
|
||||
* reset_control_get_shared
|
||||
* reset_control_get_shared
|
||||
* @dev: device to be reset by the controller
|
||||
* @index: index of the reset controller
|
||||
*
|
||||
|
@@ -995,7 +995,7 @@ struct task_struct {
|
||||
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
|
||||
struct list_head cg_list;
|
||||
#endif
|
||||
#ifdef CONFIG_RESCTRL
|
||||
#ifdef CONFIG_X86_CPU_RESCTRL
|
||||
u32 closid;
|
||||
u32 rmid;
|
||||
#endif
|
||||
@@ -1406,6 +1406,7 @@ extern struct pid *cad_pid;
|
||||
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
|
||||
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
|
||||
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
|
||||
#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
|
||||
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
|
||||
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
@@ -1904,6 +1905,14 @@ static inline void rseq_execve(struct task_struct *t)
|
||||
|
||||
#endif
|
||||
|
||||
void __exit_umh(struct task_struct *tsk);
|
||||
|
||||
static inline void exit_umh(struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(tsk->flags & PF_UMH))
|
||||
__exit_umh(tsk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
|
||||
void rseq_syscall(struct pt_regs *regs);
|
||||
|
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
|
||||
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
|
||||
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
||||
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
||||
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
|
||||
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
||||
|
||||
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
||||
|
@@ -24,9 +24,13 @@
|
||||
* called near the end of a function. Otherwise, the list can be
|
||||
* re-initialized for later re-use by wake_q_init().
|
||||
*
|
||||
* Note that this can cause spurious wakeups. schedule() callers
|
||||
* NOTE that this can cause spurious wakeups. schedule() callers
|
||||
* must ensure the call is done inside a loop, confirming that the
|
||||
* wakeup condition has in fact occurred.
|
||||
*
|
||||
* NOTE that there is no guarantee the wakeup will happen any later than the
|
||||
* wake_q_add() location. Therefore task must be ready to be woken at the
|
||||
* location of the wake_q_add().
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
|
||||
#endif
|
||||
|
||||
#define siginmask(sig, mask) \
|
||||
((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
|
||||
((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
|
||||
|
||||
#define SIG_KERNEL_ONLY_MASK (\
|
||||
rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
|
||||
|
@@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
|
||||
*
|
||||
* This is exactly the same as pskb_trim except that it ensures the
|
||||
* checksum of received packets are still valid after the operation.
|
||||
* It can change skb pointers.
|
||||
*/
|
||||
|
||||
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
|
||||
|
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
|
||||
struct clk *pclk;
|
||||
struct clk *clk_ptp_ref;
|
||||
unsigned int clk_ptp_rate;
|
||||
unsigned int clk_ref_rate;
|
||||
struct reset_control *stmmac_rst;
|
||||
struct stmmac_axi *axi;
|
||||
int has_gmac4;
|
||||
|
@@ -47,6 +47,8 @@ struct umh_info {
|
||||
const char *cmdline;
|
||||
struct file *pipe_to_umh;
|
||||
struct file *pipe_from_umh;
|
||||
struct list_head list;
|
||||
void (*cleanup)(struct umh_info *info);
|
||||
pid_t pid;
|
||||
};
|
||||
int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
|
||||
|
@@ -12,6 +12,11 @@ struct irq_affinity;
|
||||
|
||||
/**
|
||||
* virtio_config_ops - operations for configuring a virtio device
|
||||
* Note: Do not assume that a transport implements all of the operations
|
||||
* getting/setting a value as a simple read/write! Generally speaking,
|
||||
* any of @get/@set, @get_status/@set_status, or @get_features/
|
||||
* @finalize_features are NOT safe to be called from an atomic
|
||||
* context.
|
||||
* @get: read the value of a configuration field
|
||||
* vdev: the virtio_device
|
||||
* offset: the offset of the configuration field
|
||||
@@ -22,7 +27,7 @@ struct irq_affinity;
|
||||
* offset: the offset of the configuration field
|
||||
* buf: the buffer to read the field value from.
|
||||
* len: the length of the buffer
|
||||
* @generation: config generation counter
|
||||
* @generation: config generation counter (optional)
|
||||
* vdev: the virtio_device
|
||||
* Returns the config generation counter
|
||||
* @get_status: read the status byte
|
||||
@@ -48,17 +53,17 @@ struct irq_affinity;
|
||||
* @del_vqs: free virtqueues found by find_vqs().
|
||||
* @get_features: get the array of feature bits for this device.
|
||||
* vdev: the virtio_device
|
||||
* Returns the first 32 feature bits (all we currently need).
|
||||
* Returns the first 64 feature bits (all we currently need).
|
||||
* @finalize_features: confirm what device features we'll be using.
|
||||
* vdev: the virtio_device
|
||||
* This gives the final feature bits for the device: it can change
|
||||
* the dev->feature bits if it wants.
|
||||
* Returns 0 on success or error status
|
||||
* @bus_name: return the bus name associated with the device
|
||||
* @bus_name: return the bus name associated with the device (optional)
|
||||
* vdev: the virtio_device
|
||||
* This returns a pointer to the bus name a la pci_name from which
|
||||
* the caller can then copy.
|
||||
* @set_vq_affinity: set the affinity for a virtqueue.
|
||||
* @set_vq_affinity: set the affinity for a virtqueue (optional).
|
||||
* @get_vq_affinity: get the affinity for a virtqueue (optional).
|
||||
*/
|
||||
typedef void vq_callback_t(struct virtqueue *);
|
||||
|
@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
|
||||
*/
|
||||
static inline bool xa_is_err(const void *entry)
|
||||
{
|
||||
return unlikely(xa_is_internal(entry));
|
||||
return unlikely(xa_is_internal(entry) &&
|
||||
entry >= xa_mk_internal(-MAX_ERRNO));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -286,7 +287,6 @@ struct xarray {
|
||||
*/
|
||||
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
|
||||
|
||||
void xa_init_flags(struct xarray *, gfp_t flags);
|
||||
void *xa_load(struct xarray *, unsigned long index);
|
||||
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
void *xa_erase(struct xarray *, unsigned long index);
|
||||
@@ -303,6 +303,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
|
||||
unsigned long max, unsigned int n, xa_mark_t);
|
||||
void xa_destroy(struct xarray *);
|
||||
|
||||
/**
|
||||
* xa_init_flags() - Initialise an empty XArray with flags.
|
||||
* @xa: XArray.
|
||||
* @flags: XA_FLAG values.
|
||||
*
|
||||
* If you need to initialise an XArray with special flags (eg you need
|
||||
* to take the lock from interrupt context), use this function instead
|
||||
* of xa_init().
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
|
||||
{
|
||||
spin_lock_init(&xa->xa_lock);
|
||||
xa->xa_flags = flags;
|
||||
xa->xa_head = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_init() - Initialise an empty XArray.
|
||||
* @xa: XArray.
|
||||
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_for_each() - Iterate over a portion of an XArray.
|
||||
* xa_for_each_start() - Iterate over a portion of an XArray.
|
||||
* @xa: XArray.
|
||||
* @entry: Entry retrieved from array.
|
||||
* @index: Index of @entry.
|
||||
* @max: Maximum index to retrieve from array.
|
||||
* @filter: Selection criterion.
|
||||
* @entry: Entry retrieved from array.
|
||||
* @start: First index to retrieve from array.
|
||||
*
|
||||
* Initialise @index to the lowest index you want to retrieve from the
|
||||
* array. During the iteration, @entry will have the value of the entry
|
||||
* stored in @xa at @index. The iteration will skip all entries in the
|
||||
* array which do not match @filter. You may modify @index during the
|
||||
* iteration if you want to skip or reprocess indices. It is safe to modify
|
||||
* the array during the iteration. At the end of the iteration, @entry will
|
||||
* be set to NULL and @index will have a value less than or equal to max.
|
||||
* During the iteration, @entry will have the value of the entry stored
|
||||
* in @xa at @index. You may modify @index during the iteration if you
|
||||
* want to skip or reprocess indices. It is safe to modify the array
|
||||
* during the iteration. At the end of the iteration, @entry will be set
|
||||
* to NULL and @index will have a value less than or equal to max.
|
||||
*
|
||||
* xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
|
||||
* to handle your own locking with xas_for_each(), and if you have to unlock
|
||||
* after each iteration, it will also end up being O(n.log(n)).
|
||||
* xa_for_each_start() will spin if it hits a retry entry; if you intend to
|
||||
* see retry entries, you should use the xas_for_each() iterator instead.
|
||||
* The xas_for_each() iterator will expand into more inline code than
|
||||
* xa_for_each_start().
|
||||
*
|
||||
* Context: Any context. Takes and releases the RCU lock.
|
||||
*/
|
||||
#define xa_for_each_start(xa, index, entry, start) \
|
||||
for (index = start, \
|
||||
entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
|
||||
entry; \
|
||||
entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
|
||||
|
||||
/**
|
||||
* xa_for_each() - Iterate over present entries in an XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index of @entry.
|
||||
* @entry: Entry retrieved from array.
|
||||
*
|
||||
* During the iteration, @entry will have the value of the entry stored
|
||||
* in @xa at @index. You may modify @index during the iteration if you want
|
||||
* to skip or reprocess indices. It is safe to modify the array during the
|
||||
* iteration. At the end of the iteration, @entry will be set to NULL and
|
||||
* @index will have a value less than or equal to max.
|
||||
*
|
||||
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
|
||||
* to handle your own locking with xas_for_each(), and if you have to unlock
|
||||
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
|
||||
*
|
||||
* Context: Any context. Takes and releases the RCU lock.
|
||||
*/
|
||||
#define xa_for_each(xa, entry, index, max, filter) \
|
||||
for (entry = xa_find(xa, &index, max, filter); entry; \
|
||||
entry = xa_find_after(xa, &index, max, filter))
|
||||
#define xa_for_each(xa, index, entry) \
|
||||
xa_for_each_start(xa, index, entry, 0)
|
||||
|
||||
/**
|
||||
* xa_for_each_marked() - Iterate over marked entries in an XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index of @entry.
|
||||
* @entry: Entry retrieved from array.
|
||||
* @filter: Selection criterion.
|
||||
*
|
||||
* During the iteration, @entry will have the value of the entry stored
|
||||
* in @xa at @index. The iteration will skip all entries in the array
|
||||
* which do not match @filter. You may modify @index during the iteration
|
||||
* if you want to skip or reprocess indices. It is safe to modify the array
|
||||
* during the iteration. At the end of the iteration, @entry will be set to
|
||||
* NULL and @index will have a value less than or equal to max.
|
||||
*
|
||||
* xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
|
||||
* You have to handle your own locking with xas_for_each(), and if you have
|
||||
* to unlock after each iteration, it will also end up being O(n.log(n)).
|
||||
* xa_for_each_marked() will spin if it hits a retry entry; if you intend to
|
||||
* see retry entries, you should use the xas_for_each_marked() iterator
|
||||
* instead. The xas_for_each_marked() iterator will expand into more inline
|
||||
* code than xa_for_each_marked().
|
||||
*
|
||||
* Context: Any context. Takes and releases the RCU lock.
|
||||
*/
|
||||
#define xa_for_each_marked(xa, index, entry, filter) \
|
||||
for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
|
||||
entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
|
||||
|
||||
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
|
||||
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
|
||||
@@ -393,39 +463,12 @@ void *__xa_erase(struct xarray *, unsigned long index);
|
||||
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
|
||||
void *entry, gfp_t);
|
||||
int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
|
||||
int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
|
||||
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
|
||||
/**
|
||||
* __xa_insert() - Store this entry in the XArray unless another entry is
|
||||
* already present.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
* @entry: New entry.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* If you would rather see the existing entry in the array, use __xa_cmpxchg().
|
||||
* This function is for users who don't care what the entry is, only that
|
||||
* one is present.
|
||||
*
|
||||
* Context: Any context. Expects xa_lock to be held on entry. May
|
||||
* release and reacquire xa_lock if the @gfp flags permit.
|
||||
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
|
||||
* -ENOMEM if memory could not be allocated.
|
||||
*/
|
||||
static inline int __xa_insert(struct xarray *xa, unsigned long index,
|
||||
void *entry, gfp_t gfp)
|
||||
{
|
||||
void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
|
||||
if (!curr)
|
||||
return 0;
|
||||
if (xa_is_err(curr))
|
||||
return xa_err(curr);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_store_bh() - Store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_store_irq() - Erase this entry from the XArray.
|
||||
* xa_store_irq() - Store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
* @entry: New entry.
|
||||
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
|
||||
* @entry: New entry.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* If you would rather see the existing entry in the array, use xa_cmpxchg().
|
||||
* This function is for users who don't care what the entry is, only that
|
||||
* one is present.
|
||||
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
|
||||
* if no entry is present. Inserting will fail if a reserved entry is
|
||||
* present, even though loading from this index will return NULL.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock.
|
||||
* May sleep if the @gfp flags permit.
|
||||
* Context: Any context. Takes and releases the xa_lock. May sleep if
|
||||
* the @gfp flags permit.
|
||||
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
|
||||
* -ENOMEM if memory could not be allocated.
|
||||
*/
|
||||
static inline int xa_insert(struct xarray *xa, unsigned long index,
|
||||
void *entry, gfp_t gfp)
|
||||
{
|
||||
void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
|
||||
if (!curr)
|
||||
return 0;
|
||||
if (xa_is_err(curr))
|
||||
return xa_err(curr);
|
||||
return -EEXIST;
|
||||
int err;
|
||||
|
||||
xa_lock(xa);
|
||||
err = __xa_insert(xa, index, entry, gfp);
|
||||
xa_unlock(xa);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_insert_bh() - Store this entry in the XArray unless another entry is
|
||||
* already present.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
* @entry: New entry.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
|
||||
* if no entry is present. Inserting will fail if a reserved entry is
|
||||
* present, even though loading from this index will return NULL.
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock while
|
||||
* disabling softirqs. May sleep if the @gfp flags permit.
|
||||
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
|
||||
* -ENOMEM if memory could not be allocated.
|
||||
*/
|
||||
static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
|
||||
void *entry, gfp_t gfp)
|
||||
{
|
||||
int err;
|
||||
|
||||
xa_lock_bh(xa);
|
||||
err = __xa_insert(xa, index, entry, gfp);
|
||||
xa_unlock_bh(xa);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_insert_irq() - Store this entry in the XArray unless another entry is
|
||||
* already present.
|
||||
* @xa: XArray.
|
||||
* @index: Index into array.
|
||||
* @entry: New entry.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
|
||||
* if no entry is present. Inserting will fail if a reserved entry is
|
||||
* present, even though loading from this index will return NULL.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock while
|
||||
* disabling interrupts. May sleep if the @gfp flags permit.
|
||||
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
|
||||
* -ENOMEM if memory could not be allocated.
|
||||
*/
|
||||
static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
|
||||
void *entry, gfp_t gfp)
|
||||
{
|
||||
int err;
|
||||
|
||||
xa_lock_irq(xa);
|
||||
err = __xa_insert(xa, index, entry, gfp);
|
||||
xa_unlock_irq(xa);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
|
||||
(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
|
||||
}
|
||||
|
||||
#define XA_ZERO_ENTRY xa_mk_internal(256)
|
||||
#define XA_RETRY_ENTRY xa_mk_internal(257)
|
||||
#define XA_RETRY_ENTRY xa_mk_internal(256)
|
||||
#define XA_ZERO_ENTRY xa_mk_internal(257)
|
||||
|
||||
/**
|
||||
* xa_is_zero() - Is the entry a zero entry?
|
||||
@@ -995,6 +1097,17 @@ static inline bool xa_is_retry(const void *entry)
|
||||
return unlikely(entry == XA_RETRY_ENTRY);
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_is_advanced() - Is the entry only permitted for the advanced API?
|
||||
* @entry: Entry to be stored in the XArray.
|
||||
*
|
||||
* Return: %true if the entry cannot be stored by the normal API.
|
||||
*/
|
||||
static inline bool xa_is_advanced(const void *entry)
|
||||
{
|
||||
return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
|
||||
}
|
||||
|
||||
/**
|
||||
* typedef xa_update_node_t - A callback function from the XArray.
|
||||
* @node: The node which is being processed
|
||||
|
Reference in New Issue
Block a user