Merge tag 'asoc-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next
ASoC: Updates for v5.4 Quite a big update this time around, particularly in the core where we've had a lot of cleanups from Morimoto-san - there's not much functional change but quite a bit of modernization going on. We've also seen a lot of driver work, a lot of it cleanups but also some particular drivers. - Lots and lots of cleanups from Morimoto-san and Yue Haibing. - Lots of cleanups and enhancements to the Freescale, sunxi dnd Intel rivers. - Initial Sound Open Firmware suppot for i.MX8. - Removal of w90x900 and nuc900 drivers as the platforms are being removed. - New support for Cirrus Logic CS47L15 and CS47L92, Freescale i.MX 7ULP and 8MQ, Meson G12A and NXP UDA1334
This commit is contained in:
@@ -181,6 +181,7 @@ struct blkcg_policy {
|
||||
|
||||
extern struct blkcg blkcg_root;
|
||||
extern struct cgroup_subsys_state * const blkcg_root_css;
|
||||
extern bool blkcg_debug_stats;
|
||||
|
||||
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
|
||||
struct request_queue *q, bool update_hint);
|
||||
|
@@ -170,6 +170,8 @@ struct ccp_aes_engine {
|
||||
enum ccp_aes_mode mode;
|
||||
enum ccp_aes_action action;
|
||||
|
||||
u32 authsize;
|
||||
|
||||
struct scatterlist *key;
|
||||
u32 key_len; /* In bytes */
|
||||
|
||||
|
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
|
||||
|
||||
static inline void ceph_buffer_put(struct ceph_buffer *b)
|
||||
{
|
||||
kref_put(&b->kref, ceph_buffer_release);
|
||||
if (b)
|
||||
kref_put(&b->kref, ceph_buffer_release);
|
||||
}
|
||||
|
||||
extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
|
||||
|
@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
|
||||
/**
|
||||
* devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
|
||||
* @dev: device for clock "consumer"
|
||||
* @num_clks: the number of clk_bulk_data
|
||||
* @clks: pointer to the clk_bulk_data table of consumer
|
||||
*
|
||||
* Behaves the same as devm_clk_bulk_get() except where there is no clock
|
||||
|
@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
long ______r; \
|
||||
static struct ftrace_likely_data \
|
||||
__aligned(4) \
|
||||
__section("_ftrace_annotated_branch") \
|
||||
__section(_ftrace_annotated_branch) \
|
||||
______f = { \
|
||||
.data.func = __func__, \
|
||||
.data.file = __FILE__, \
|
||||
@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
#define __trace_if_value(cond) ({ \
|
||||
static struct ftrace_branch_data \
|
||||
__aligned(4) \
|
||||
__section("_ftrace_branch") \
|
||||
__section(_ftrace_branch) \
|
||||
__if_trace = { \
|
||||
.func = __func__, \
|
||||
.file = __FILE__, \
|
||||
@@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
".popsection\n\t"
|
||||
|
||||
/* Annotate a C jump table to allow objtool to follow the code flow */
|
||||
#define __annotate_jump_table __section(".rodata..c_jump_table")
|
||||
#define __annotate_jump_table __section(.rodata..c_jump_table)
|
||||
|
||||
#else
|
||||
#define annotate_reachable()
|
||||
@@ -298,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr)
|
||||
* visible to the compiler.
|
||||
*/
|
||||
#define __ADDRESSABLE(sym) \
|
||||
static void * __section(".discard.addressable") __used \
|
||||
static void * __section(.discard.addressable) __used \
|
||||
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
|
||||
|
||||
/**
|
||||
|
@@ -50,7 +50,6 @@ struct cn_dev {
|
||||
|
||||
u32 seq, groups;
|
||||
struct sock *nls;
|
||||
void (*input) (struct sk_buff *skb);
|
||||
|
||||
struct cn_queue_dev *cbdev;
|
||||
};
|
||||
|
@@ -145,7 +145,11 @@ struct cred {
|
||||
struct user_struct *user; /* real user ID subscription */
|
||||
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
|
||||
struct group_info *group_info; /* supplementary groups for euid/fsgid */
|
||||
struct rcu_head rcu; /* RCU deletion hook */
|
||||
/* RCU deletion */
|
||||
union {
|
||||
int non_rcu; /* Can we skip RCU deletion? */
|
||||
struct rcu_head rcu; /* RCU deletion hook */
|
||||
};
|
||||
} __randomize_layout;
|
||||
|
||||
extern void __put_cred(struct cred *);
|
||||
@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
|
||||
if (!cred)
|
||||
return cred;
|
||||
validate_creds(cred);
|
||||
nonconst_cred->non_rcu = 0;
|
||||
return get_new_cred(nonconst_cred);
|
||||
}
|
||||
|
||||
@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
|
||||
if (!atomic_inc_not_zero(&nonconst_cred->usage))
|
||||
return NULL;
|
||||
validate_creds(cred);
|
||||
nonconst_cred->non_rcu = 0;
|
||||
return cred;
|
||||
}
|
||||
|
||||
|
@@ -915,6 +915,8 @@ struct dev_links_info {
|
||||
* This identifies the device type and carries type-specific
|
||||
* information.
|
||||
* @mutex: Mutex to synchronize calls to its driver.
|
||||
* @lockdep_mutex: An optional debug lock that a subsystem can use as a
|
||||
* peer lock to gain localized lockdep coverage of the device_lock.
|
||||
* @bus: Type of bus device is on.
|
||||
* @driver: Which driver has allocated this
|
||||
* @platform_data: Platform data specific to the device.
|
||||
@@ -998,6 +1000,9 @@ struct device {
|
||||
core doesn't touch it */
|
||||
void *driver_data; /* Driver data, set and get with
|
||||
dev_set_drvdata/dev_get_drvdata */
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
struct mutex lockdep_mutex;
|
||||
#endif
|
||||
struct mutex mutex; /* mutex to synchronize calls to
|
||||
* its driver.
|
||||
*/
|
||||
@@ -1383,6 +1388,7 @@ extern int (*platform_notify_remove)(struct device *dev);
|
||||
*/
|
||||
extern struct device *get_device(struct device *dev);
|
||||
extern void put_device(struct device *dev);
|
||||
extern bool kill_device(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_DEVTMPFS
|
||||
extern int devtmpfs_create_node(struct device *dev);
|
||||
|
@@ -272,62 +272,6 @@ dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
|
||||
|
||||
/* Net DIM */
|
||||
|
||||
/*
|
||||
* Net DIM profiles:
|
||||
* There are different set of profiles for each CQ period mode.
|
||||
* There are different set of profiles for RX/TX CQs.
|
||||
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
|
||||
*/
|
||||
#define NET_DIM_PARAMS_NUM_PROFILES 5
|
||||
#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
|
||||
#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
|
||||
#define NET_DIM_DEF_PROFILE_CQE 1
|
||||
#define NET_DIM_DEF_PROFILE_EQE 1
|
||||
|
||||
#define NET_DIM_RX_EQE_PROFILES { \
|
||||
{1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
}
|
||||
|
||||
#define NET_DIM_RX_CQE_PROFILES { \
|
||||
{2, 256}, \
|
||||
{8, 128}, \
|
||||
{16, 64}, \
|
||||
{32, 64}, \
|
||||
{64, 64} \
|
||||
}
|
||||
|
||||
#define NET_DIM_TX_EQE_PROFILES { \
|
||||
{1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
|
||||
{128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
|
||||
}
|
||||
|
||||
#define NET_DIM_TX_CQE_PROFILES { \
|
||||
{5, 128}, \
|
||||
{8, 64}, \
|
||||
{16, 32}, \
|
||||
{32, 32}, \
|
||||
{64, 32} \
|
||||
}
|
||||
|
||||
static const struct dim_cq_moder
|
||||
rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
|
||||
NET_DIM_RX_EQE_PROFILES,
|
||||
NET_DIM_RX_CQE_PROFILES,
|
||||
};
|
||||
|
||||
static const struct dim_cq_moder
|
||||
tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
|
||||
NET_DIM_TX_EQE_PROFILES,
|
||||
NET_DIM_TX_CQE_PROFILES,
|
||||
};
|
||||
|
||||
/**
|
||||
* net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
|
||||
* @cq_period_mode: CQ period mode
|
||||
|
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
|
||||
size_t align = get_order(PAGE_ALIGN(size));
|
||||
|
||||
return alloc_pages_node(node, gfp, align);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void dma_free_contiguous(struct device *dev, struct page *page,
|
||||
|
@@ -689,8 +689,8 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
||||
*/
|
||||
static inline bool dma_addressing_limited(struct device *dev)
|
||||
{
|
||||
return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
|
||||
dma_get_required_mask(dev);
|
||||
return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
|
||||
dma_get_required_mask(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
|
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
|
||||
#else
|
||||
# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
|
||||
#endif
|
||||
static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return prot; /* no protection bits supported without page tables */
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
|
||||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
|
@@ -45,7 +45,6 @@ struct elevator_mq_ops {
|
||||
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
|
||||
bool (*has_work)(struct blk_mq_hw_ctx *);
|
||||
void (*completed_request)(struct request *, u64);
|
||||
void (*started_request)(struct request *);
|
||||
void (*requeue_request)(struct request *);
|
||||
struct request *(*former_request)(struct request_queue *, struct request *);
|
||||
struct request *(*next_request)(struct request_queue *, struct request *);
|
||||
|
@@ -24,6 +24,7 @@
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <uapi/linux/filter.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
@@ -747,6 +748,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||
return size <= size_default && (size & (size - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline u8
|
||||
bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default)
|
||||
{
|
||||
u8 load_off = off & (size_default - 1);
|
||||
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
return load_off * 8;
|
||||
#else
|
||||
return (size_default - (load_off + size)) * 8;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define bpf_ctx_wide_access_ok(off, size, type, field) \
|
||||
(size == sizeof(__u64) && \
|
||||
off >= offsetof(type, field) && \
|
||||
|
@@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||
void *holder);
|
||||
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
|
||||
void *holder);
|
||||
extern struct block_device *bd_start_claiming(struct block_device *bdev,
|
||||
void *holder);
|
||||
extern void bd_finish_claiming(struct block_device *bdev,
|
||||
struct block_device *whole, void *holder);
|
||||
extern void bd_abort_claiming(struct block_device *bdev,
|
||||
struct block_device *whole, void *holder);
|
||||
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
|
||||
extern int __blkdev_reread_part(struct block_device *bdev);
|
||||
extern int blkdev_reread_part(struct block_device *bdev);
|
||||
|
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
|
||||
}
|
||||
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
int node, bool hugepage);
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
|
||||
int node);
|
||||
#else
|
||||
#define alloc_pages(gfp_mask, order) \
|
||||
alloc_pages_node(numa_node_id(), gfp_mask, order)
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
|
||||
alloc_pages(gfp_mask, order)
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
|
||||
alloc_pages(gfp_mask, order)
|
||||
#endif
|
||||
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
||||
#define alloc_page_vma(gfp_mask, vma, addr) \
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
|
||||
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, node)
|
||||
|
||||
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
|
||||
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
|
||||
|
@@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
|
||||
unsigned int gpio_offset, unsigned int pin_offset,
|
||||
unsigned int npins)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
gpiochip_add_pingroup_range(struct gpio_chip *chip,
|
||||
struct pinctrl_dev *pctldev,
|
||||
unsigned int gpio_offset, const char *pin_group)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
gpiochip_remove_pin_ranges(struct gpio_chip *chip)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
|
||||
const char *label)
|
||||
{
|
||||
|
@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
|
||||
might_sleep();
|
||||
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
|
||||
static inline void devm_gpiod_unhinge(struct device *dev,
|
||||
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
|
||||
might_sleep();
|
||||
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
|
||||
static inline void gpiod_put_array(struct gpio_descs *descs)
|
||||
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
|
||||
might_sleep();
|
||||
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(descs);
|
||||
}
|
||||
|
||||
static inline struct gpio_desc *__must_check
|
||||
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
|
||||
might_sleep();
|
||||
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
|
||||
static inline void devm_gpiod_put_array(struct device *dev,
|
||||
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
|
||||
might_sleep();
|
||||
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(descs);
|
||||
}
|
||||
|
||||
|
||||
static inline int gpiod_get_direction(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int gpiod_direction_input(struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
|
||||
static inline int gpiod_get_value(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_array_value(unsigned int array_size,
|
||||
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
static inline int gpiod_set_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_raw_array_value(unsigned int array_size,
|
||||
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
static inline int gpiod_set_raw_array_value(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
|
||||
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
|
||||
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
|
||||
int value)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
}
|
||||
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
struct gpio_desc **desc_array,
|
||||
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
|
||||
unsigned long *value_bitmap)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc_array);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return 0;
|
||||
}
|
||||
static inline int gpiod_cansleep(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int gpiod_to_irq(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
|
||||
const char *name)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
|
||||
static inline int desc_to_gpio(const struct gpio_desc *desc)
|
||||
{
|
||||
/* GPIO can never have been requested */
|
||||
WARN_ON(1);
|
||||
WARN_ON(desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
|
||||
*/
|
||||
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
|
||||
|
||||
/* This is a temporary helper to avoid merge conflict between trees. */
|
||||
static inline bool hmm_vma_range_done(struct hmm_range *range)
|
||||
{
|
||||
bool ret = hmm_range_valid(range);
|
||||
|
||||
hmm_range_unregister(range);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This is a temporary helper to avoid merge conflict between trees. */
|
||||
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
|
||||
struct hmm_range *range, bool block)
|
||||
{
|
||||
long ret;
|
||||
|
||||
/*
|
||||
* With the old API the driver must set each individual entries with
|
||||
* the requested flags (valid, write, ...). So here we set the mask to
|
||||
* keep intact the entries provided by the driver and zero out the
|
||||
* default_flags.
|
||||
*/
|
||||
range->default_flags = 0;
|
||||
range->pfn_flags_mask = -1UL;
|
||||
|
||||
ret = hmm_range_register(range, mirror,
|
||||
range->start, range->end,
|
||||
PAGE_SHIFT);
|
||||
if (ret)
|
||||
return (int)ret;
|
||||
|
||||
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
|
||||
/*
|
||||
* The mmap_sem was taken by driver we release it here and
|
||||
* returns -EAGAIN which correspond to mmap_sem have been
|
||||
* drop in the old API.
|
||||
*/
|
||||
up_read(&range->vma->vm_mm->mmap_sem);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ret = hmm_range_fault(range, block);
|
||||
if (ret <= 0) {
|
||||
if (ret == -EBUSY || !ret) {
|
||||
/* Same as above, drop mmap_sem to match old API. */
|
||||
up_read(&range->vma->vm_mm->mmap_sem);
|
||||
ret = -EBUSY;
|
||||
} else if (ret == -EAGAIN)
|
||||
ret = -EBUSY;
|
||||
hmm_range_unregister(range);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Below are for HMM internal use only! Not to be used by device driver! */
|
||||
static inline void hmm_mm_init(struct mm_struct *mm)
|
||||
{
|
||||
|
@@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
|
||||
extern void unregister_pppox_proto(int proto_num);
|
||||
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
|
||||
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
|
||||
extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
|
||||
|
||||
#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
|
||||
|
||||
/* PPPoX socket states */
|
||||
enum {
|
||||
|
@@ -41,11 +41,11 @@ struct rmnet_map_ul_csum_header {
|
||||
__be16 csum_start_offset;
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u16 csum_insert_offset:14;
|
||||
u16 udp_ip4_ind:1;
|
||||
u16 udp_ind:1;
|
||||
u16 csum_enabled:1;
|
||||
#elif defined (__BIG_ENDIAN_BITFIELD)
|
||||
u16 csum_enabled:1;
|
||||
u16 udp_ip4_ind:1;
|
||||
u16 udp_ind:1;
|
||||
u16 csum_insert_offset:14;
|
||||
#else
|
||||
#error "Please fix <asm/byteorder.h>"
|
||||
|
@@ -48,7 +48,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN0619", 0 },
|
||||
{ "ELAN061A", 0 },
|
||||
{ "ELAN061B", 0 },
|
||||
/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
|
||||
{ "ELAN061C", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
{ "ELAN061E", 0 },
|
||||
|
@@ -346,7 +346,6 @@ enum {
|
||||
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
|
||||
|
||||
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
|
||||
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
|
||||
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
|
||||
#define QI_EIOTLB_AM(am) (((u64)am))
|
||||
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
|
||||
@@ -378,8 +377,6 @@ enum {
|
||||
#define QI_RESP_INVALID 0x1
|
||||
#define QI_RESP_FAILURE 0xf
|
||||
|
||||
#define QI_GRAN_ALL_ALL 0
|
||||
#define QI_GRAN_NONG_ALL 1
|
||||
#define QI_GRAN_NONG_PASID 2
|
||||
#define QI_GRAN_PSI_PASID 3
|
||||
|
||||
|
@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
unsigned long start_pfn);
|
||||
bool has_iova_flush_queue(struct iova_domain *iovad);
|
||||
int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool has_iova_flush_queue(struct iova_domain *iovad)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb,
|
||||
iova_entry_dtor entry_dtor)
|
||||
|
@@ -94,11 +94,11 @@ struct keyring_index_key {
|
||||
union {
|
||||
struct {
|
||||
#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
|
||||
u8 desc_len;
|
||||
char desc[sizeof(long) - 1]; /* First few chars of description */
|
||||
u16 desc_len;
|
||||
char desc[sizeof(long) - 2]; /* First few chars of description */
|
||||
#else
|
||||
char desc[sizeof(long) - 1]; /* First few chars of description */
|
||||
u8 desc_len;
|
||||
char desc[sizeof(long) - 2]; /* First few chars of description */
|
||||
u16 desc_len;
|
||||
#endif
|
||||
};
|
||||
unsigned long x;
|
||||
|
@@ -861,8 +861,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool kvm_arch_has_vcpu_debugfs(void);
|
||||
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
|
||||
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
|
||||
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
int kvm_arch_hardware_enable(void);
|
||||
void kvm_arch_hardware_disable(void);
|
||||
@@ -872,6 +873,7 @@ int kvm_arch_check_processor_compat(void);
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
|
||||
/*
|
||||
|
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
|
||||
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
|
||||
resource_size_t hw_addr, resource_size_t size);
|
||||
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
|
||||
void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
|
||||
resource_size_t logic_pio_to_hwaddr(unsigned long pio);
|
||||
unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
|
||||
|
||||
|
@@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
|
||||
|
||||
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
int val);
|
||||
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
|
||||
|
||||
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
||||
enum node_stat_item idx, int val)
|
||||
@@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
|
||||
mod_node_page_state(page_pgdat(page), idx, val);
|
||||
}
|
||||
|
||||
static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
|
||||
int val)
|
||||
{
|
||||
struct page *page = virt_to_head_page(p);
|
||||
|
||||
__mod_node_page_state(page_pgdat(page), idx, val);
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
gfp_t gfp_mask,
|
||||
@@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
|
||||
__mod_lruvec_page_state(page, idx, -1);
|
||||
}
|
||||
|
||||
static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
|
||||
{
|
||||
__mod_lruvec_slab_state(p, idx, 1);
|
||||
}
|
||||
|
||||
static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
|
||||
{
|
||||
__mod_lruvec_slab_state(p, idx, -1);
|
||||
}
|
||||
|
||||
/* idx can be of type enum memcg_stat_item or node_stat_item */
|
||||
static inline void inc_memcg_state(struct mem_cgroup *memcg,
|
||||
int idx)
|
||||
|
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
|
||||
struct mempolicy *get_task_policy(struct task_struct *p);
|
||||
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
bool vma_policy_mof(struct vm_area_struct *vma);
|
||||
|
||||
extern void numa_default_policy(void);
|
||||
|
@@ -446,11 +446,11 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20,
|
||||
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20,
|
||||
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@@ -220,6 +220,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
|
||||
|
||||
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
|
||||
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
|
||||
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
|
||||
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
||||
u64 *bytes, u64 *packets, u64 *lastuse);
|
||||
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|
||||
|
@@ -5975,10 +5975,12 @@ struct mlx5_ifc_modify_cq_in_bits {
|
||||
|
||||
struct mlx5_ifc_cqc_bits cq_context;
|
||||
|
||||
u8 reserved_at_280[0x40];
|
||||
u8 reserved_at_280[0x60];
|
||||
|
||||
u8 cq_umem_valid[0x1];
|
||||
u8 reserved_at_2c1[0x5bf];
|
||||
u8 reserved_at_2e1[0x1f];
|
||||
|
||||
u8 reserved_at_300[0x580];
|
||||
|
||||
u8 pas[0][0x40];
|
||||
};
|
||||
@@ -10052,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_tls_progress_params_bits {
|
||||
u8 valid[0x1];
|
||||
u8 reserved_at_1[0x7];
|
||||
u8 pd[0x18];
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 tisn[0x18];
|
||||
|
||||
u8 next_record_tcp_sn[0x20];
|
||||
|
||||
|
@@ -159,7 +159,16 @@ struct page {
|
||||
/** @pgmap: Points to the hosting device page map. */
|
||||
struct dev_pagemap *pgmap;
|
||||
void *zone_device_data;
|
||||
unsigned long _zd_pad_1; /* uses mapping */
|
||||
/*
|
||||
* ZONE_DEVICE private pages are counted as being
|
||||
* mapped so the next 3 words hold the mapping, index,
|
||||
* and private fields from the source anonymous or
|
||||
* page cache page while the page is migrated to device
|
||||
* private memory.
|
||||
* ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
|
||||
* use the mapping, index, and private fields when
|
||||
* pmem backed DAX files are mapped.
|
||||
*/
|
||||
};
|
||||
|
||||
/** @rcu_head: You can use this to free a page by RCU. */
|
||||
|
@@ -215,8 +215,9 @@ enum node_stat_item {
|
||||
NR_INACTIVE_FILE, /* " " " " " */
|
||||
NR_ACTIVE_FILE, /* " " " " " */
|
||||
NR_UNEVICTABLE, /* " " " " " */
|
||||
NR_SLAB_RECLAIMABLE,
|
||||
NR_SLAB_UNRECLAIMABLE,
|
||||
NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
|
||||
NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
|
||||
* memcg_flush_percpu_vmstats() first. */
|
||||
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
|
||||
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
|
||||
WORKINGSET_NODES,
|
||||
|
@@ -814,6 +814,7 @@ struct tee_client_device_id {
|
||||
/**
|
||||
* struct wmi_device_id - WMI device identifier
|
||||
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
|
||||
* @context: pointer to driver specific data
|
||||
*/
|
||||
struct wmi_device_id {
|
||||
const char guid_string[UUID_STRING_LEN+1];
|
||||
|
@@ -1,7 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323
|
||||
* conntrack/NAT module.
|
||||
* BER and PER decoding library for H.323 conntrack/NAT module.
|
||||
*
|
||||
* Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
|
||||
*
|
||||
|
@@ -4,6 +4,9 @@
|
||||
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
|
||||
*/
|
||||
|
||||
#ifndef _NF_CONNTRACK_H323_TYPES_H
|
||||
#define _NF_CONNTRACK_H323_TYPES_H
|
||||
|
||||
typedef struct TransportAddress_ipAddress { /* SEQUENCE */
|
||||
int options; /* No use */
|
||||
unsigned int ip;
|
||||
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
|
||||
InfoRequestResponse infoRequestResponse;
|
||||
};
|
||||
} RasMessage;
|
||||
|
||||
#endif /* _NF_CONNTRACK_H323_TYPES_H */
|
||||
|
@@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np,
|
||||
}
|
||||
|
||||
/**
|
||||
* of_property_read_bool - Findfrom a property
|
||||
* of_property_read_bool - Find a property
|
||||
* @np: device node from which the property value is to be read.
|
||||
* @propname: name of the property to be searched.
|
||||
*
|
||||
|
@@ -32,6 +32,7 @@
|
||||
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifndef BUILD_VDSO32_64
|
||||
/*
|
||||
* page->flags layout:
|
||||
*
|
||||
@@ -76,20 +77,22 @@
|
||||
#define LAST_CPUPID_SHIFT 0
|
||||
#endif
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define KASAN_TAG_WIDTH 8
|
||||
#else
|
||||
#define KASAN_TAG_WIDTH 0
|
||||
#endif
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
|
||||
<= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
|
||||
#else
|
||||
#define LAST_CPUPID_WIDTH 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define KASAN_TAG_WIDTH 8
|
||||
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
|
||||
> BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#error "KASAN: not enough bits in page flags for tag"
|
||||
#endif
|
||||
#else
|
||||
#define KASAN_TAG_WIDTH 0
|
||||
#error "Not enough bits in page flags"
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -104,4 +107,5 @@
|
||||
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|
||||
|
@@ -152,6 +152,8 @@ enum pageflags {
|
||||
PG_savepinned = PG_dirty,
|
||||
/* Has a grant mapping of another (foreign) domain's page. */
|
||||
PG_foreign = PG_owner_priv_1,
|
||||
/* Remapped by swiotlb-xen. */
|
||||
PG_xen_remapped = PG_owner_priv_1,
|
||||
|
||||
/* SLOB */
|
||||
PG_slob_free = PG_private,
|
||||
@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
|
||||
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
|
||||
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
|
||||
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
|
||||
PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
|
||||
TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
|
||||
|
||||
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
|
||||
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
|
||||
|
@@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
|
||||
|
||||
#ifdef CONFIG_PCIEASPM
|
||||
bool pcie_aspm_support_enabled(void);
|
||||
bool pcie_aspm_enabled(struct pci_dev *pdev);
|
||||
#else
|
||||
static inline bool pcie_aspm_support_enabled(void) { return false; }
|
||||
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCIEAER
|
||||
|
@@ -1107,6 +1107,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
|
||||
int genphy_c45_read_mdix(struct phy_device *phydev);
|
||||
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
|
||||
int genphy_c45_read_status(struct phy_device *phydev);
|
||||
int genphy_c45_config_aneg(struct phy_device *phydev);
|
||||
|
||||
/* The gen10g_* functions are the old Clause 45 stub */
|
||||
int gen10g_config_aneg(struct phy_device *phydev);
|
||||
|
@@ -1092,7 +1092,15 @@ struct task_struct {
|
||||
u64 last_sum_exec_runtime;
|
||||
struct callback_head numa_work;
|
||||
|
||||
struct numa_group *numa_group;
|
||||
/*
|
||||
* This pointer is only modified for current in syscall and
|
||||
* pagefault context (and for tasks being destroyed), so it can be read
|
||||
* from any of the following contexts:
|
||||
* - RCU read-side critical section
|
||||
* - current->numa_group from everywhere
|
||||
* - task's runqueue locked, task not running
|
||||
*/
|
||||
struct numa_group __rcu *numa_group;
|
||||
|
||||
/*
|
||||
* numa_faults is an array split into four regions:
|
||||
|
@@ -19,7 +19,7 @@
|
||||
extern void task_numa_fault(int last_node, int node, int pages, int flags);
|
||||
extern pid_t task_numa_group_id(struct task_struct *p);
|
||||
extern void set_numabalancing_state(bool enabled);
|
||||
extern void task_numa_free(struct task_struct *p);
|
||||
extern void task_numa_free(struct task_struct *p, bool final);
|
||||
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
|
||||
int src_nid, int dst_cpu);
|
||||
#else
|
||||
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
|
||||
static inline void set_numabalancing_state(bool enabled)
|
||||
{
|
||||
}
|
||||
static inline void task_numa_free(struct task_struct *p)
|
||||
static inline void task_numa_free(struct task_struct *p, bool final)
|
||||
{
|
||||
}
|
||||
static inline bool should_numa_migrate_memory(struct task_struct *p,
|
||||
|
@@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
|
||||
extern void exit_signals(struct task_struct *tsk);
|
||||
extern void kernel_sigaction(int, __sighandler_t);
|
||||
|
||||
#define SIG_KTHREAD ((__force __sighandler_t)2)
|
||||
#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
|
||||
|
||||
static inline void allow_signal(int sig)
|
||||
{
|
||||
/*
|
||||
@@ -289,7 +292,17 @@ static inline void allow_signal(int sig)
|
||||
* know it'll be handled, so that they don't get converted to
|
||||
* SIGKILL or just silently dropped.
|
||||
*/
|
||||
kernel_sigaction(sig, (__force __sighandler_t)2);
|
||||
kernel_sigaction(sig, SIG_KTHREAD);
|
||||
}
|
||||
|
||||
static inline void allow_kernel_signal(int sig)
|
||||
{
|
||||
/*
|
||||
* Kernel threads handle their own signals. Let the signal code
|
||||
* know signals sent by the kernel will be handled, so that they
|
||||
* don't get silently dropped.
|
||||
*/
|
||||
kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
|
||||
}
|
||||
|
||||
static inline void disallow_signal(int sig)
|
||||
|
@@ -1374,6 +1374,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
|
||||
to->l4_hash = from->l4_hash;
|
||||
};
|
||||
|
||||
static inline void skb_copy_decrypted(struct sk_buff *to,
|
||||
const struct sk_buff *from)
|
||||
{
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
to->decrypted = from->decrypted;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
||||
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
|
||||
{
|
||||
|
@@ -354,7 +354,13 @@ static inline void sk_psock_restore_proto(struct sock *sk,
|
||||
sk->sk_write_space = psock->saved_write_space;
|
||||
|
||||
if (psock->sk_proto) {
|
||||
sk->sk_prot = psock->sk_proto;
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
bool has_ulp = !!icsk->icsk_ulp_data;
|
||||
|
||||
if (has_ulp)
|
||||
tcp_update_ulp(sk, psock->sk_proto);
|
||||
else
|
||||
sk->sk_prot = psock->sk_proto;
|
||||
psock->sk_proto = NULL;
|
||||
}
|
||||
}
|
||||
|
@@ -292,6 +292,9 @@ struct ucred {
|
||||
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
|
||||
#define MSG_EOF MSG_FIN
|
||||
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
|
||||
#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
|
||||
* plain text and require encryption
|
||||
*/
|
||||
|
||||
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
|
||||
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
|
||||
|
@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
|
||||
|
||||
struct rpc_call_ops {
|
||||
void (*rpc_call_prepare)(struct rpc_task *, void *);
|
||||
void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
|
||||
void (*rpc_call_done)(struct rpc_task *, void *);
|
||||
void (*rpc_count_stats)(struct rpc_task *, void *);
|
||||
void (*rpc_release)(void *);
|
||||
|
@@ -57,6 +57,7 @@ struct tk_read_base {
|
||||
* @cs_was_changed_seq: The sequence number of clocksource change events
|
||||
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
|
||||
* @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
|
||||
* @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
|
||||
* @cycle_interval: Number of clock cycles in one NTP interval
|
||||
* @xtime_interval: Number of clock shifted nano seconds in one NTP
|
||||
* interval.
|
||||
@@ -84,6 +85,9 @@ struct tk_read_base {
|
||||
*
|
||||
* wall_to_monotonic is no longer the boot time, getboottime must be
|
||||
* used instead.
|
||||
*
|
||||
* @monotonic_to_boottime is a timespec64 representation of @offs_boot to
|
||||
* accelerate the VDSO update for CLOCK_BOOTTIME.
|
||||
*/
|
||||
struct timekeeper {
|
||||
struct tk_read_base tkr_mono;
|
||||
@@ -99,6 +103,7 @@ struct timekeeper {
|
||||
u8 cs_was_changed_seq;
|
||||
ktime_t next_leap_ktime;
|
||||
u64 raw_sec;
|
||||
struct timespec64 monotonic_to_boot;
|
||||
|
||||
/* The following members are for timekeeping internal use */
|
||||
u64 cycle_interval;
|
||||
|
@@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
|
||||
|
||||
#define is_signed_type(type) (((type)(-1)) < (type)1)
|
||||
|
||||
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
|
||||
int trace_set_clr_event(const char *system, const char *event, int set);
|
||||
|
||||
/*
|
||||
|
@@ -1457,7 +1457,7 @@ typedef void (*usb_complete_t)(struct urb *);
|
||||
* field rather than determining a dma address themselves.
|
||||
*
|
||||
* Note that transfer_buffer must still be set if the controller
|
||||
* does not support DMA (as indicated by bus.uses_dma) and when talking
|
||||
* does not support DMA (as indicated by hcd_uses_dma()) and when talking
|
||||
* to root hub. If you have to trasfer between highmem zone and the device
|
||||
* on such controller, create a bounce buffer or bail out with an error.
|
||||
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
|
||||
|
@@ -422,6 +422,9 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
|
||||
return hcd->high_prio_bh.completing_ep == ep;
|
||||
}
|
||||
|
||||
#define hcd_uses_dma(hcd) \
|
||||
(IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma)
|
||||
|
||||
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
|
||||
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
|
||||
int status);
|
||||
|
@@ -126,6 +126,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head)
|
||||
return !list_empty(&wq_head->head);
|
||||
}
|
||||
|
||||
/**
|
||||
* wq_has_single_sleeper - check if there is only one sleeper
|
||||
* @wq_head: wait queue head
|
||||
*
|
||||
* Returns true of wq_head has only one sleeper on the list.
|
||||
*
|
||||
* Please refer to the comment for waitqueue_active.
|
||||
*/
|
||||
static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
|
||||
{
|
||||
return list_is_singular(&wq_head->head);
|
||||
}
|
||||
|
||||
/**
|
||||
* wq_has_sleeper - check if there are any waiting processes
|
||||
* @wq_head: wait queue head
|
||||
|
Reference in New Issue
Block a user