Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Just minor overlapping changes in the conflicts here.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2019-08-06 18:44:57 -07:00
1012 changed files with 9640 additions and 7752 deletions

View File

@@ -181,6 +181,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);

View File

@@ -311,6 +311,7 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
__REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
@@ -345,6 +346,7 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
return cookie != BLK_QC_T_NONE;
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)

View File

@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
/**
* devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
* @dev: device for clock "consumer"
* @num_clks: the number of clk_bulk_data
* @clks: pointer to the clk_bulk_data table of consumer
*
* Behaves the same as devm_clk_bulk_get() except where there is no clock

View File

@@ -145,7 +145,11 @@ struct cred {
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
/* RCU deletion */
union {
int non_rcu; /* Can we skip RCU deletion? */
struct rcu_head rcu; /* RCU deletion hook */
};
} __randomize_layout;
extern void __put_cred(struct cred *);
@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
if (!cred)
return cred;
validate_creds(cred);
nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}
@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
if (!atomic_inc_not_zero(&nonconst_cred->usage))
return NULL;
validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}

View File

@@ -915,6 +915,8 @@ struct dev_links_info {
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
* @lockdep_mutex: An optional debug lock that a subsystem can use as a
* peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
@@ -998,6 +1000,9 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
#ifdef CONFIG_PROVE_LOCKING
struct mutex lockdep_mutex;
#endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -1383,6 +1388,7 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
extern bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);

View File

@@ -272,62 +272,6 @@ dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
/* Net DIM */
/*
* Net DIM profiles:
* There are different set of profiles for each CQ period mode.
* There are different set of profiles for RX/TX CQs.
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
#define NET_DIM_PARAMS_NUM_PROFILES 5
#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
#define NET_DIM_RX_EQE_PROFILES { \
{1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
}
#define NET_DIM_RX_CQE_PROFILES { \
{2, 256}, \
{8, 128}, \
{16, 64}, \
{32, 64}, \
{64, 64} \
}
#define NET_DIM_TX_EQE_PROFILES { \
{1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
}
#define NET_DIM_TX_CQE_PROFILES { \
{5, 128}, \
{8, 64}, \
{16, 32}, \
{32, 32}, \
{64, 32} \
}
static const struct dim_cq_moder
rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_RX_EQE_PROFILES,
NET_DIM_RX_CQE_PROFILES,
};
static const struct dim_cq_moder
tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_TX_EQE_PROFILES,
NET_DIM_TX_CQE_PROFILES,
};
/**
* net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
* @cq_period_mode: CQ period mode

View File

@@ -689,8 +689,8 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
*/
static inline bool dma_addressing_limited(struct device *dev)
{
return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
dma_get_required_mask(dev);
return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
dma_get_required_mask(dev);
}
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS

View File

@@ -45,7 +45,6 @@ struct elevator_mq_ops {
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
void (*completed_request)(struct request *, u64);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
struct request *(*next_request)(struct request_queue *, struct request *);

View File

@@ -24,6 +24,7 @@
#include <net/sch_generic.h>
#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
#include <uapi/linux/bpf.h>
@@ -747,6 +748,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
static inline u8
bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default)
{
u8 load_off = off & (size_default - 1);
#ifdef __LITTLE_ENDIAN
return load_off * 8;
#else
return (size_default - (load_off + size)) * 8;
#endif
}
#define bpf_ctx_wide_access_ok(off, size, type, field) \
(size == sizeof(__u64) && \
off >= offsetof(type, field) && \

View File

@@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
void *holder);
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
extern struct block_device *bd_start_claiming(struct block_device *bdev,
void *holder);
extern void bd_finish_claiming(struct block_device *bdev,
struct block_device *whole, void *holder);
extern void bd_abort_claiming(struct block_device *bdev,
struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
extern int __blkdev_reread_part(struct block_device *bdev);
extern int blkdev_reread_part(struct block_device *bdev);

View File

@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline void devm_gpiod_unhinge(struct device *dev,
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(descs);
}
static inline struct gpio_desc *__must_check
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline void devm_gpiod_put_array(struct device *dev,
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(descs);
}
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -ENOSYS;
}
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return 0;
}
static inline int gpiod_get_array_value(unsigned int array_size,
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return 0;
}
static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return 0;
}
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return 0;
}
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc_array);
return 0;
}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return 0;
}
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -EINVAL;
}
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
const char *name)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -EINVAL;
}
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
WARN_ON(desc);
return -EINVAL;
}

View File

@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
/* This is a temporary helper to avoid merge conflict between trees. */
static inline bool hmm_vma_range_done(struct hmm_range *range)
{
bool ret = hmm_range_valid(range);
hmm_range_unregister(range);
return ret;
}
/* This is a temporary helper to avoid merge conflict between trees. */
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
struct hmm_range *range, bool block)
{
long ret;
/*
* With the old API the driver must set each individual entries with
* the requested flags (valid, write, ...). So here we set the mask to
* keep intact the entries provided by the driver and zero out the
* default_flags.
*/
range->default_flags = 0;
range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, mirror,
range->start, range->end,
PAGE_SHIFT);
if (ret)
return (int)ret;
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
/*
* The mmap_sem was taken by driver we release it here and
* returns -EAGAIN which correspond to mmap_sem have been
* drop in the old API.
*/
up_read(&range->vma->vm_mm->mmap_sem);
return -EAGAIN;
}
ret = hmm_range_fault(range, block);
if (ret <= 0) {
if (ret == -EBUSY || !ret) {
/* Same as above, drop mmap_sem to match old API. */
up_read(&range->vma->vm_mm->mmap_sem);
ret = -EBUSY;
} else if (ret == -EAGAIN)
ret = -EBUSY;
hmm_range_unregister(range);
return ret;
}
return 0;
}
/* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm)
{

View File

@@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {

View File

@@ -41,11 +41,11 @@ struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u16 csum_insert_offset:14;
u16 udp_ip4_ind:1;
u16 udp_ind:1;
u16 csum_enabled:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
u16 csum_enabled:1;
u16 udp_ip4_ind:1;
u16 udp_ind:1;
u16 csum_insert_offset:14;
#else
#error "Please fix <asm/byteorder.h>"

View File

@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
bool has_iova_flush_queue(struct iova_domain *iovad);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
static inline bool has_iova_flush_queue(struct iova_domain *iovad)
{
return false;
}
static inline int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb,
iova_entry_dtor entry_dtor)

View File

@@ -220,6 +220,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,

View File

@@ -5998,10 +5998,12 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
u8 reserved_at_280[0x40];
u8 reserved_at_280[0x60];
u8 cq_umem_valid[0x1];
u8 reserved_at_2c1[0x5bf];
u8 reserved_at_2e1[0x1f];
u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};

View File

@@ -814,6 +814,7 @@ struct tee_client_device_id {
/**
* struct wmi_device_id - WMI device identifier
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @context: pointer to driver specific data
*/
struct wmi_device_id {
const char guid_string[UUID_STRING_LEN+1];

View File

@@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np,
}
/**
* of_property_read_bool - Findfrom a property
* of_property_read_bool - Find a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*

View File

@@ -32,6 +32,7 @@
#endif /* CONFIG_SPARSEMEM */
#ifndef BUILD_VDSO32_64
/*
* page->flags layout:
*
@@ -76,20 +77,22 @@
#define LAST_CPUPID_SHIFT 0
#endif
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_TAG_WIDTH 8
#else
#define KASAN_TAG_WIDTH 0
#endif
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
<= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
#endif
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_TAG_WIDTH 8
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
> BITS_PER_LONG - NR_PAGEFLAGS
#error "KASAN: not enough bits in page flags for tag"
#endif
#else
#define KASAN_TAG_WIDTH 0
#error "Not enough bits in page flags"
#endif
/*
@@ -104,4 +107,5 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */

View File

@@ -152,6 +152,8 @@ enum pageflags {
PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1,
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
/* SLOB */
PG_slob_free = PG_private,
@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)

View File

@@ -1092,7 +1092,15 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
struct numa_group *numa_group;
/*
* This pointer is only modified for current in syscall and
* pagefault context (and for tasks being destroyed), so it can be read
* from any of the following contexts:
* - RCU read-side critical section
* - current->numa_group from everywhere
* - task's runqueue locked, task not running
*/
struct numa_group __rcu *numa_group;
/*
* numa_faults is an array split into four regions:

View File

@@ -19,7 +19,7 @@
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p);
extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
static inline void set_numabalancing_state(bool enabled)
{
}
static inline void task_numa_free(struct task_struct *p)
static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,

View File

@@ -354,7 +354,13 @@ static inline void sk_psock_restore_proto(struct sock *sk,
sk->sk_write_space = psock->saved_write_space;
if (psock->sk_proto) {
sk->sk_prot = psock->sk_proto;
struct inet_connection_sock *icsk = inet_csk(sk);
bool has_ulp = !!icsk->icsk_ulp_data;
if (has_ulp)
tcp_update_ulp(sk, psock->sk_proto);
else
sk->sk_prot = psock->sk_proto;
psock->sk_proto = NULL;
}
}

View File

@@ -126,6 +126,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head)
return !list_empty(&wq_head->head);
}
/**
* wq_has_single_sleeper - check if there is only one sleeper
* @wq_head: wait queue head
*
* Returns true of wq_head has only one sleeper on the list.
*
* Please refer to the comment for waitqueue_active.
*/
static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
{
return list_is_singular(&wq_head->head);
}
/**
* wq_has_sleeper - check if there are any waiting processes
* @wq_head: wait queue head