Backmerge tag 'v4.14-rc7' into drm-next
Linux 4.14-rc7 Requested by Ben Skeggs for nouveau to avoid major conflicts, and things were getting a bit conflicty already, esp around amdgpu reverts.
This commit is contained in:
@@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
|
||||
int executable_stack);
|
||||
extern int transfer_args_to_stack(struct linux_binprm *bprm,
|
||||
unsigned long *sp_location);
|
||||
extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
|
||||
extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
|
||||
extern int copy_strings_kernel(int argc, const char *const *argv,
|
||||
struct linux_binprm *bprm);
|
||||
extern int prepare_bprm_creds(struct linux_binprm *bprm);
|
||||
|
@@ -92,7 +92,7 @@
|
||||
/**
|
||||
* FIELD_GET() - extract a bitfield element
|
||||
* @_mask: shifted mask defining the field's length and position
|
||||
* @_reg: 32bit value of entire bitfield
|
||||
* @_reg: value of entire bitfield
|
||||
*
|
||||
* FIELD_GET() extracts the field specified by @_mask from the
|
||||
* bitfield passed in as @_reg by masking and shifting it down.
|
||||
|
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int bpf_obj_get_user(const char __user *pathname)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
|
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
|
||||
loff_t, unsigned, unsigned,
|
||||
struct page *, void *);
|
||||
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
|
||||
void clean_page_buffers(struct page *page);
|
||||
int cont_write_begin(struct file *, struct address_space *, loff_t,
|
||||
unsigned, unsigned, struct page **, void **,
|
||||
get_block_t *, loff_t *);
|
||||
|
@@ -307,8 +307,6 @@ struct driver_attribute {
|
||||
size_t count);
|
||||
};
|
||||
|
||||
#define DRIVER_ATTR(_name, _mode, _show, _store) \
|
||||
struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
|
||||
#define DRIVER_ATTR_RW(_name) \
|
||||
struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
|
||||
#define DRIVER_ATTR_RO(_name) \
|
||||
|
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
|
||||
void bpf_warn_invalid_xdp_action(u32 act);
|
||||
void bpf_warn_invalid_xdp_redirect(u32 ifindex);
|
||||
|
||||
struct sock *do_sk_redirect_map(void);
|
||||
struct sock *do_sk_redirect_map(struct sk_buff *skb);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
extern int bpf_jit_enable;
|
||||
|
@@ -403,7 +403,7 @@ struct address_space {
|
||||
unsigned long flags; /* error bits */
|
||||
spinlock_t private_lock; /* for use by the address_space */
|
||||
gfp_t gfp_mask; /* implicit gfp mask for allocations */
|
||||
struct list_head private_list; /* ditto */
|
||||
struct list_head private_list; /* for use by the address_space */
|
||||
void *private_data; /* ditto */
|
||||
errseq_t wb_err;
|
||||
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
|
||||
|
@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
|
||||
const int *srv_version, int srv_vercnt,
|
||||
int *nego_fw_version, int *nego_srv_version);
|
||||
|
||||
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
|
||||
void hv_process_channel_removal(u32 relid);
|
||||
|
||||
void vmbus_setevent(struct vmbus_channel *channel);
|
||||
/*
|
||||
|
@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
|
||||
int tap_get_minor(dev_t major, struct tap_dev *tap);
|
||||
void tap_free_minor(dev_t major, struct tap_dev *tap);
|
||||
int tap_queue_resize(struct tap_dev *tap);
|
||||
int tap_create_cdev(struct cdev *tap_cdev,
|
||||
dev_t *tap_major, const char *device_name);
|
||||
int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
|
||||
const char *device_name, struct module *module);
|
||||
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
|
||||
|
||||
#endif /*_LINUX_IF_TAP_H_*/
|
||||
|
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
|
||||
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
|
||||
unsigned int size, unsigned int *val);
|
||||
|
||||
int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
|
||||
unsigned int reset_length);
|
||||
|
||||
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
|
||||
const struct iio_chan_spec *chan, int *val);
|
||||
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
|
||||
|
@@ -234,6 +234,10 @@ struct input_dev {
|
||||
#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
|
||||
#endif
|
||||
|
||||
#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
|
||||
#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
|
||||
#endif
|
||||
|
||||
#define INPUT_DEVICE_ID_MATCH_DEVICE \
|
||||
(INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
|
||||
#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
|
||||
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
|
||||
int input_set_keycode(struct input_dev *dev,
|
||||
const struct input_keymap_entry *ke);
|
||||
|
||||
bool input_match_device_id(const struct input_dev *dev,
|
||||
const struct input_device_id *id);
|
||||
|
||||
void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
|
||||
|
||||
extern struct class input_class;
|
||||
|
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
|
||||
void irq_gc_unmask_enable_reg(struct irq_data *d);
|
||||
void irq_gc_ack_set_bit(struct irq_data *d);
|
||||
void irq_gc_ack_clr_bit(struct irq_data *d);
|
||||
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
|
||||
void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
|
||||
void irq_gc_eoi(struct irq_data *d);
|
||||
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
|
||||
|
||||
|
@@ -372,6 +372,8 @@
|
||||
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
|
||||
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
|
||||
#define GITS_BASER_PHYS_52_to_48(phys) \
|
||||
(((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
|
||||
#define GITS_BASER_SHAREABILITY_SHIFT (10)
|
||||
#define GITS_BASER_InnerShareable \
|
||||
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
|
||||
|
@@ -44,6 +44,12 @@
|
||||
|
||||
#define STACK_MAGIC 0xdeadbeef
|
||||
|
||||
/**
|
||||
* REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
|
||||
* @x: value to repeat
|
||||
*
|
||||
* NOTE: @x is not checked for > 0xff; larger values produce odd results.
|
||||
*/
|
||||
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
|
||||
|
||||
/* @a is a power of 2 value */
|
||||
@@ -57,6 +63,10 @@
|
||||
#define READ 0
|
||||
#define WRITE 1
|
||||
|
||||
/**
|
||||
* ARRAY_SIZE - get the number of elements in array @arr
|
||||
* @arr: array to be sized
|
||||
*/
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
|
||||
|
||||
#define u64_to_user_ptr(x) ( \
|
||||
@@ -76,7 +86,15 @@
|
||||
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
|
||||
#define round_down(x, y) ((x) & ~__round_mask(x, y))
|
||||
|
||||
/**
|
||||
* FIELD_SIZEOF - get the size of a struct's field
|
||||
* @t: the target struct
|
||||
* @f: the target struct's field
|
||||
* Return: the size of @f in the struct definition without having a
|
||||
* declared instance of @t.
|
||||
*/
|
||||
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
|
||||
|
||||
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
|
||||
|
||||
#define DIV_ROUND_DOWN_ULL(ll, d) \
|
||||
@@ -107,7 +125,7 @@
|
||||
/*
|
||||
* Divide positive or negative dividend by positive or negative divisor
|
||||
* and round to closest integer. Result is undefined for negative
|
||||
* divisors if he dividend variable type is unsigned and for negative
|
||||
* divisors if the dividend variable type is unsigned and for negative
|
||||
* dividends if the divisor variable type is unsigned.
|
||||
*/
|
||||
#define DIV_ROUND_CLOSEST(x, divisor)( \
|
||||
@@ -247,13 +265,13 @@ extern int _cond_resched(void);
|
||||
* @ep_ro: right open interval endpoint
|
||||
*
|
||||
* Perform a "reciprocal multiplication" in order to "scale" a value into
|
||||
* range [0, ep_ro), where the upper interval endpoint is right-open.
|
||||
* range [0, @ep_ro), where the upper interval endpoint is right-open.
|
||||
* This is useful, e.g. for accessing a index of an array containing
|
||||
* ep_ro elements, for example. Think of it as sort of modulus, only that
|
||||
* @ep_ro elements, for example. Think of it as sort of modulus, only that
|
||||
* the result isn't that of modulo. ;) Note that if initial input is a
|
||||
* small value, then result will return 0.
|
||||
*
|
||||
* Return: a result based on val in interval [0, ep_ro).
|
||||
* Return: a result based on @val in interval [0, @ep_ro).
|
||||
*/
|
||||
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
|
||||
{
|
||||
@@ -618,8 +636,8 @@ do { \
|
||||
* trace_printk - printf formatting in the ftrace buffer
|
||||
* @fmt: the printf format for printing
|
||||
*
|
||||
* Note: __trace_printk is an internal function for trace_printk and
|
||||
* the @ip is passed in via the trace_printk macro.
|
||||
* Note: __trace_printk is an internal function for trace_printk() and
|
||||
* the @ip is passed in via the trace_printk() macro.
|
||||
*
|
||||
* This function allows a kernel developer to debug fast path sections
|
||||
* that printk is not appropriate for. By scattering in various
|
||||
@@ -629,7 +647,7 @@ do { \
|
||||
* This is intended as a debugging tool for the developer only.
|
||||
* Please refrain from leaving trace_printks scattered around in
|
||||
* your code. (Extra memory is used for special buffers that are
|
||||
* allocated when trace_printk() is used)
|
||||
* allocated when trace_printk() is used.)
|
||||
*
|
||||
* A little optization trick is done here. If there's only one
|
||||
* argument, there's no need to scan the string for printf formats.
|
||||
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
|
||||
* the @ip is passed in via the trace_puts macro.
|
||||
*
|
||||
* This is similar to trace_printk() but is made for those really fast
|
||||
* paths that a developer wants the least amount of "Heisenbug" affects,
|
||||
* paths that a developer wants the least amount of "Heisenbug" effects,
|
||||
* where the processing of the print format is still too much.
|
||||
*
|
||||
* This function allows a kernel developer to debug fast path sections
|
||||
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
|
||||
* This is intended as a debugging tool for the developer only.
|
||||
* Please refrain from leaving trace_puts scattered around in
|
||||
* your code. (Extra memory is used for special buffers that are
|
||||
* allocated when trace_puts() is used)
|
||||
* allocated when trace_puts() is used.)
|
||||
*
|
||||
* Returns: 0 if nothing was written, positive # if string was.
|
||||
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
|
||||
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
t2 min2 = (y); \
|
||||
(void) (&min1 == &min2); \
|
||||
min1 < min2 ? min1 : min2; })
|
||||
|
||||
/**
|
||||
* min - return minimum of two values of the same or compatible types
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
*/
|
||||
#define min(x, y) \
|
||||
__min(typeof(x), typeof(y), \
|
||||
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
|
||||
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
t2 max2 = (y); \
|
||||
(void) (&max1 == &max2); \
|
||||
max1 > max2 ? max1 : max2; })
|
||||
|
||||
/**
|
||||
* max - return maximum of two values of the same or compatible types
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
*/
|
||||
#define max(x, y) \
|
||||
__max(typeof(x), typeof(y), \
|
||||
__UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
|
||||
x, y)
|
||||
|
||||
/**
|
||||
* min3 - return minimum of three values
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
* @z: third value
|
||||
*/
|
||||
#define min3(x, y, z) min((typeof(x))min(x, y), z)
|
||||
|
||||
/**
|
||||
* max3 - return maximum of three values
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
* @z: third value
|
||||
*/
|
||||
#define max3(x, y, z) max((typeof(x))max(x, y), z)
|
||||
|
||||
/**
|
||||
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
* @lo: lowest allowable value
|
||||
* @hi: highest allowable value
|
||||
*
|
||||
* This macro does strict typechecking of lo/hi to make sure they are of the
|
||||
* same type as val. See the unnecessary pointer comparisons.
|
||||
* This macro does strict typechecking of @lo/@hi to make sure they are of the
|
||||
* same type as @val. See the unnecessary pointer comparisons.
|
||||
*/
|
||||
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
|
||||
|
||||
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
*
|
||||
* Or not use min/max/clamp at all, of course.
|
||||
*/
|
||||
|
||||
/**
|
||||
* min_t - return minimum of two values, using the specified type
|
||||
* @type: data type to use
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
*/
|
||||
#define min_t(type, x, y) \
|
||||
__min(type, type, \
|
||||
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
|
||||
x, y)
|
||||
|
||||
/**
|
||||
* max_t - return maximum of two values, using the specified type
|
||||
* @type: data type to use
|
||||
* @x: first value
|
||||
* @y: second value
|
||||
*/
|
||||
#define max_t(type, x, y) \
|
||||
__max(type, type, \
|
||||
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
|
||||
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
* @hi: maximum allowable value
|
||||
*
|
||||
* This macro does no typechecking and uses temporary variables of type
|
||||
* 'type' to make all the comparisons.
|
||||
* @type to make all the comparisons.
|
||||
*/
|
||||
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
|
||||
|
||||
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
* @hi: maximum allowable value
|
||||
*
|
||||
* This macro does no typechecking and uses temporary variables of whatever
|
||||
* type the input argument 'val' is. This is useful when val is an unsigned
|
||||
* type and min and max are literals that will otherwise be assigned a signed
|
||||
* type the input argument @val is. This is useful when @val is an unsigned
|
||||
* type and @lo and @hi are literals that will otherwise be assigned a signed
|
||||
* integer type.
|
||||
*/
|
||||
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
|
||||
|
||||
|
||||
/*
|
||||
* swap - swap value of @a and @b
|
||||
/**
|
||||
* swap - swap values of @a and @b
|
||||
* @a: first value
|
||||
* @b: second value
|
||||
*/
|
||||
#define swap(a, b) \
|
||||
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
||||
|
@@ -138,6 +138,11 @@ struct key_restriction {
|
||||
struct key_type *keytype;
|
||||
};
|
||||
|
||||
enum key_state {
|
||||
KEY_IS_UNINSTANTIATED,
|
||||
KEY_IS_POSITIVE, /* Positively instantiated */
|
||||
};
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* authentication token / access credential / keyring
|
||||
@@ -169,6 +174,7 @@ struct key {
|
||||
* - may not match RCU dereferenced payload
|
||||
* - payload should contain own length
|
||||
*/
|
||||
short state; /* Key state (+) or rejection error (-) */
|
||||
|
||||
#ifdef KEY_DEBUGGING
|
||||
unsigned magic;
|
||||
@@ -176,18 +182,16 @@ struct key {
|
||||
#endif
|
||||
|
||||
unsigned long flags; /* status flags (change with bitops) */
|
||||
#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
|
||||
#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
|
||||
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
|
||||
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
|
||||
#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
|
||||
#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
|
||||
#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
|
||||
#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
|
||||
#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
|
||||
#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
|
||||
#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
|
||||
#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
|
||||
#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
|
||||
#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
|
||||
|
||||
/* the key type and key description string
|
||||
* - the desc is used to match a key against search criteria
|
||||
@@ -213,7 +217,6 @@ struct key {
|
||||
struct list_head name_link;
|
||||
struct assoc_array keys;
|
||||
};
|
||||
int reject_error;
|
||||
};
|
||||
|
||||
/* This is set on a keyring to restrict the addition of a link to a key
|
||||
@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
|
||||
#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
|
||||
#define KEY_NEED_ALL 0x3f /* All the above permissions */
|
||||
|
||||
static inline short key_read_state(const struct key *key)
|
||||
{
|
||||
/* Barrier versus mark_key_instantiated(). */
|
||||
return smp_load_acquire(&key->state);
|
||||
}
|
||||
|
||||
/**
|
||||
* key_is_instantiated - Determine if a key has been positively instantiated
|
||||
* key_is_positive - Determine if a key has been positively instantiated
|
||||
* @key: The key to check.
|
||||
*
|
||||
* Return true if the specified key has been positively instantiated, false
|
||||
* otherwise.
|
||||
*/
|
||||
static inline bool key_is_instantiated(const struct key *key)
|
||||
static inline bool key_is_positive(const struct key *key)
|
||||
{
|
||||
return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
|
||||
!test_bit(KEY_FLAG_NEGATIVE, &key->flags);
|
||||
return key_read_state(key) == KEY_IS_POSITIVE;
|
||||
}
|
||||
|
||||
static inline bool key_is_negative(const struct key *key)
|
||||
{
|
||||
return key_read_state(key) < 0;
|
||||
}
|
||||
|
||||
#define dereference_key_rcu(KEY) \
|
||||
|
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
|
||||
struct mbus_dram_window {
|
||||
u8 cs_index;
|
||||
u8 mbus_attr;
|
||||
u32 base;
|
||||
u32 size;
|
||||
u64 base;
|
||||
u64 size;
|
||||
} cs[4];
|
||||
};
|
||||
|
||||
|
@@ -980,7 +980,6 @@ enum mlx5_cap_type {
|
||||
MLX5_CAP_RESERVED,
|
||||
MLX5_CAP_VECTOR_CALC,
|
||||
MLX5_CAP_QOS,
|
||||
MLX5_CAP_FPGA,
|
||||
/* NUM OF CAP Types */
|
||||
MLX5_CAP_NUM
|
||||
};
|
||||
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
|
||||
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
|
||||
|
||||
#define MLX5_CAP_FPGA(mdev, cap) \
|
||||
MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
|
||||
MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
|
||||
|
||||
#define MLX5_CAP64_FPGA(mdev, cap) \
|
||||
MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
|
||||
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
|
||||
|
||||
enum {
|
||||
MLX5_CMD_STAT_OK = 0x0,
|
||||
|
@@ -774,6 +774,7 @@ struct mlx5_core_dev {
|
||||
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
|
||||
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
|
||||
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
|
||||
} caps;
|
||||
phys_addr_t iseg_base;
|
||||
struct mlx5_init_seg __iomem *iseg;
|
||||
|
@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
|
||||
u8 reserved_at_80[0x18];
|
||||
u8 log_max_destination[0x8];
|
||||
|
||||
u8 reserved_at_a0[0x18];
|
||||
u8 log_max_flow_counter[0x8];
|
||||
u8 reserved_at_a8[0x10];
|
||||
u8 log_max_flow[0x8];
|
||||
|
||||
u8 reserved_at_c0[0x40];
|
||||
|
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
|
||||
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
|
||||
u8 prio, u8 *tc);
|
||||
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
|
||||
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
|
||||
u8 tc, u8 *tc_group);
|
||||
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
|
||||
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
|
||||
u8 tc, u8 *bw_pct);
|
||||
|
@@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp);
|
||||
|
||||
#if defined(CONFIG_X86_INTEL_MPX)
|
||||
/* MPX specific bounds table or bounds directory */
|
||||
# define VM_MPX VM_HIGH_ARCH_BIT_4
|
||||
# define VM_MPX VM_HIGH_ARCH_4
|
||||
#else
|
||||
# define VM_MPX VM_NONE
|
||||
#endif
|
||||
|
@@ -445,6 +445,9 @@ struct mm_struct {
|
||||
unsigned long flags; /* Must use atomic bitops to access the bits */
|
||||
|
||||
struct core_state *core_state; /* coredumping support */
|
||||
#ifdef CONFIG_MEMBARRIER
|
||||
atomic_t membarrier_state;
|
||||
#endif
|
||||
#ifdef CONFIG_AIO
|
||||
spinlock_t ioctx_lock;
|
||||
struct kioctx_table __rcu *ioctx_table;
|
||||
|
@@ -316,7 +316,7 @@ struct mmc_host {
|
||||
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
|
||||
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
|
||||
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
|
||||
#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */
|
||||
/* (1 << 21) is free for reuse */
|
||||
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
|
||||
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
|
||||
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
|
||||
|
@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
|
||||
|
||||
#else /* CONFIG_MMU_NOTIFIER */
|
||||
|
||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_release(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
@@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
|
||||
#error Allocator MAX_ORDER exceeds SECTION_SIZE
|
||||
#endif
|
||||
|
||||
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
|
||||
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
|
||||
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
|
||||
{
|
||||
return pfn >> PFN_SECTION_SHIFT;
|
||||
}
|
||||
static inline unsigned long section_nr_to_pfn(unsigned long sec)
|
||||
{
|
||||
return sec << PFN_SECTION_SHIFT;
|
||||
}
|
||||
|
||||
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
|
||||
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
|
||||
|
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
|
||||
#define INPUT_DEVICE_ID_SND_MAX 0x07
|
||||
#define INPUT_DEVICE_ID_FF_MAX 0x7f
|
||||
#define INPUT_DEVICE_ID_SW_MAX 0x0f
|
||||
#define INPUT_DEVICE_ID_PROP_MAX 0x1f
|
||||
|
||||
#define INPUT_DEVICE_ID_MATCH_BUS 1
|
||||
#define INPUT_DEVICE_ID_MATCH_VENDOR 2
|
||||
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
|
||||
#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
|
||||
#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
|
||||
#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
|
||||
#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000
|
||||
|
||||
struct input_device_id {
|
||||
|
||||
@@ -327,6 +329,7 @@ struct input_device_id {
|
||||
kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
|
||||
kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
|
||||
kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
|
||||
kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
|
||||
|
||||
kernel_ulong_t driver_info;
|
||||
};
|
||||
|
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
|
||||
unsigned char name_assign_type,
|
||||
void (*setup)(struct net_device *),
|
||||
unsigned int txqs, unsigned int rxqs);
|
||||
int dev_get_valid_name(struct net *net, struct net_device *dev,
|
||||
const char *name);
|
||||
|
||||
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
|
||||
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
|
||||
|
||||
|
@@ -108,9 +108,10 @@ struct ebt_table {
|
||||
|
||||
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
|
||||
~(__alignof__(struct _xt_align)-1))
|
||||
extern struct ebt_table *ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern int ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *ops,
|
||||
struct ebt_table **res);
|
||||
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
|
@@ -12,11 +12,31 @@
|
||||
|
||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
void lockup_detector_init(void);
|
||||
void lockup_detector_soft_poweroff(void);
|
||||
void lockup_detector_cleanup(void);
|
||||
bool is_hardlockup(void);
|
||||
|
||||
extern int watchdog_user_enabled;
|
||||
extern int nmi_watchdog_user_enabled;
|
||||
extern int soft_watchdog_user_enabled;
|
||||
extern int watchdog_thresh;
|
||||
extern unsigned long watchdog_enabled;
|
||||
|
||||
extern struct cpumask watchdog_cpumask;
|
||||
extern unsigned long *watchdog_cpumask_bits;
|
||||
#ifdef CONFIG_SMP
|
||||
extern int sysctl_softlockup_all_cpu_backtrace;
|
||||
extern int sysctl_hardlockup_all_cpu_backtrace;
|
||||
#else
|
||||
static inline void lockup_detector_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#define sysctl_softlockup_all_cpu_backtrace 0
|
||||
#define sysctl_hardlockup_all_cpu_backtrace 0
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#else /* CONFIG_LOCKUP_DETECTOR */
|
||||
static inline void lockup_detector_init(void) { }
|
||||
static inline void lockup_detector_soft_poweroff(void) { }
|
||||
static inline void lockup_detector_cleanup(void) { }
|
||||
#endif /* !CONFIG_LOCKUP_DETECTOR */
|
||||
|
||||
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
|
||||
extern void touch_softlockup_watchdog_sched(void);
|
||||
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
|
||||
extern void touch_softlockup_watchdog_sync(void);
|
||||
extern void touch_all_softlockup_watchdogs(void);
|
||||
extern unsigned int softlockup_panic;
|
||||
extern int soft_watchdog_enabled;
|
||||
extern atomic_t watchdog_park_in_progress;
|
||||
#else
|
||||
static inline void touch_softlockup_watchdog_sched(void)
|
||||
{
|
||||
}
|
||||
static inline void touch_softlockup_watchdog(void)
|
||||
{
|
||||
}
|
||||
static inline void touch_softlockup_watchdog_sync(void)
|
||||
{
|
||||
}
|
||||
static inline void touch_all_softlockup_watchdogs(void)
|
||||
{
|
||||
}
|
||||
static inline void touch_softlockup_watchdog_sched(void) { }
|
||||
static inline void touch_softlockup_watchdog(void) { }
|
||||
static inline void touch_softlockup_watchdog_sync(void) { }
|
||||
static inline void touch_all_softlockup_watchdogs(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DETECT_HUNG_TASK
|
||||
void reset_hung_task_detector(void);
|
||||
#else
|
||||
static inline void reset_hung_task_detector(void)
|
||||
{
|
||||
}
|
||||
static inline void reset_hung_task_detector(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
|
||||
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
|
||||
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
|
||||
*
|
||||
* 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
|
||||
* are variables that are only used as an 'interface' between the parameters
|
||||
* in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
|
||||
* 'watchdog_thresh' variable is handled differently because its value is not
|
||||
* boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
|
||||
* is equal zero.
|
||||
* 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
|
||||
* 'soft_watchdog_user_enabled' are variables that are only used as an
|
||||
* 'interface' between the parameters in /proc/sys/kernel and the internal
|
||||
* state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
|
||||
* handled differently because its value is not boolean, and the lockup
|
||||
* detectors are 'suspended' while 'watchdog_thresh' is equal zero.
|
||||
*/
|
||||
#define NMI_WATCHDOG_ENABLED_BIT 0
|
||||
#define SOFT_WATCHDOG_ENABLED_BIT 1
|
||||
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
|
||||
static inline void hardlockup_detector_disable(void) {}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
|
||||
# define NMI_WATCHDOG_SYSCTL_PERM 0644
|
||||
#else
|
||||
# define NMI_WATCHDOG_SYSCTL_PERM 0444
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
|
||||
extern void arch_touch_nmi_watchdog(void);
|
||||
extern void hardlockup_detector_perf_stop(void);
|
||||
extern void hardlockup_detector_perf_restart(void);
|
||||
extern void hardlockup_detector_perf_disable(void);
|
||||
extern void hardlockup_detector_perf_enable(void);
|
||||
extern void hardlockup_detector_perf_cleanup(void);
|
||||
extern int hardlockup_detector_perf_init(void);
|
||||
#else
|
||||
#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
|
||||
static inline void hardlockup_detector_perf_stop(void) { }
|
||||
static inline void hardlockup_detector_perf_restart(void) { }
|
||||
static inline void hardlockup_detector_perf_disable(void) { }
|
||||
static inline void hardlockup_detector_perf_enable(void) { }
|
||||
static inline void hardlockup_detector_perf_cleanup(void) { }
|
||||
# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
|
||||
static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
|
||||
static inline void arch_touch_nmi_watchdog(void) {}
|
||||
# else
|
||||
static inline int hardlockup_detector_perf_init(void) { return 0; }
|
||||
# endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void watchdog_nmi_stop(void);
|
||||
void watchdog_nmi_start(void);
|
||||
int watchdog_nmi_probe(void);
|
||||
|
||||
/**
|
||||
* touch_nmi_watchdog - restart NMI watchdog timeout.
|
||||
*
|
||||
*
|
||||
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
|
||||
* may be used to reset the timeout - for code which intentionally
|
||||
* disables interrupts for a long time. This call is stateless.
|
||||
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
|
||||
u64 hw_nmi_get_sample_period(int watchdog_thresh);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
extern int nmi_watchdog_enabled;
|
||||
extern int watchdog_user_enabled;
|
||||
extern int watchdog_thresh;
|
||||
extern unsigned long watchdog_enabled;
|
||||
extern struct cpumask watchdog_cpumask;
|
||||
extern unsigned long *watchdog_cpumask_bits;
|
||||
extern int __read_mostly watchdog_suspended;
|
||||
#ifdef CONFIG_SMP
|
||||
extern int sysctl_softlockup_all_cpu_backtrace;
|
||||
extern int sysctl_hardlockup_all_cpu_backtrace;
|
||||
#else
|
||||
#define sysctl_softlockup_all_cpu_backtrace 0
|
||||
#define sysctl_hardlockup_all_cpu_backtrace 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
|
||||
defined(CONFIG_HARDLOCKUP_DETECTOR)
|
||||
void watchdog_update_hrtimer_threshold(u64 period);
|
||||
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
|
||||
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
|
||||
#endif
|
||||
|
||||
extern bool is_hardlockup(void);
|
||||
struct ctl_table;
|
||||
extern int proc_watchdog(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_watchdog_cpumask(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int lockup_detector_suspend(void);
|
||||
extern void lockup_detector_resume(void);
|
||||
#else
|
||||
static inline int lockup_detector_suspend(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void lockup_detector_resume(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
|
||||
#include <asm/nmi.h>
|
||||
|
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int of_n_addr_cells(struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
|
||||
}
|
||||
static inline int of_n_size_cells(struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int of_property_read_u64(const struct device_node *np,
|
||||
const char *propname, u64 *out_value)
|
||||
{
|
||||
|
@@ -27,16 +27,17 @@ enum pm_qos_flags_status {
|
||||
PM_QOS_FLAGS_ALL,
|
||||
};
|
||||
|
||||
#define PM_QOS_DEFAULT_VALUE -1
|
||||
#define PM_QOS_DEFAULT_VALUE (-1)
|
||||
#define PM_QOS_LATENCY_ANY S32_MAX
|
||||
|
||||
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
|
||||
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
|
||||
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
|
||||
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
|
||||
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
|
||||
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
|
||||
#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
|
||||
|
||||
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
|
||||
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
|
||||
|
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
#define list_entry_rcu(ptr, type, member) \
|
||||
container_of(lockless_dereference(ptr), type, member)
|
||||
|
||||
/**
|
||||
/*
|
||||
* Where are list_empty_rcu() and list_first_entry_rcu()?
|
||||
*
|
||||
* Implementing those functions following their counterparts list_empty() and
|
||||
|
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* Return the value of the specified RCU-protected pointer, but omit
|
||||
* both the smp_read_barrier_depends() and the READ_ONCE(). This
|
||||
* is useful in cases where update-side locks prevent the value of the
|
||||
* pointer from changing. Please note that this primitive does -not-
|
||||
* pointer from changing. Please note that this primitive does *not*
|
||||
* prevent the compiler from repeating this reference or combining it
|
||||
* with other references, so it should not be used without protection
|
||||
* of appropriate locks.
|
||||
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* is handed off from RCU to some other synchronization mechanism, for
|
||||
* example, reference counting or locking. In C11, it would map to
|
||||
* kill_dependency(). It could be used as follows:
|
||||
*
|
||||
* ``
|
||||
* rcu_read_lock();
|
||||
* p = rcu_dereference(gp);
|
||||
* long_lived = is_long_lived(p);
|
||||
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* p = rcu_pointer_handoff(p);
|
||||
* }
|
||||
* rcu_read_unlock();
|
||||
*``
|
||||
*/
|
||||
#define rcu_pointer_handoff(p) (p)
|
||||
|
||||
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
|
||||
/**
|
||||
* RCU_INIT_POINTER() - initialize an RCU protected pointer
|
||||
* @p: The pointer to be initialized.
|
||||
* @v: The value to initialized the pointer to.
|
||||
*
|
||||
* Initialize an RCU-protected pointer in special cases where readers
|
||||
* do not need ordering constraints on the CPU or the compiler. These
|
||||
* special cases are:
|
||||
*
|
||||
* 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
|
||||
* 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
|
||||
* 2. The caller has taken whatever steps are required to prevent
|
||||
* RCU readers from concurrently accessing this pointer -or-
|
||||
* RCU readers from concurrently accessing this pointer *or*
|
||||
* 3. The referenced data structure has already been exposed to
|
||||
* readers either at compile time or via rcu_assign_pointer() -and-
|
||||
* a. You have not made -any- reader-visible changes to
|
||||
* this structure since then -or-
|
||||
* readers either at compile time or via rcu_assign_pointer() *and*
|
||||
*
|
||||
* a. You have not made *any* reader-visible changes to
|
||||
* this structure since then *or*
|
||||
* b. It is OK for readers accessing this structure from its
|
||||
* new location to see the old state of the structure. (For
|
||||
* example, the changes were to statistical counters or to
|
||||
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
* by a single external-to-structure RCU-protected pointer, then you may
|
||||
* use RCU_INIT_POINTER() to initialize the internal RCU-protected
|
||||
* pointers, but you must use rcu_assign_pointer() to initialize the
|
||||
* external-to-structure pointer -after- you have completely initialized
|
||||
* external-to-structure pointer *after* you have completely initialized
|
||||
* the reader-accessible portions of the linked structure.
|
||||
*
|
||||
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
|
||||
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
|
||||
/**
|
||||
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
|
||||
* @p: The pointer to be initialized.
|
||||
* @v: The value to initialized the pointer to.
|
||||
*
|
||||
* GCC-style initialization for an RCU-protected pointer in a structure field.
|
||||
*/
|
||||
|
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
|
||||
|
||||
/* mmput gets rid of the mappings and all user-space */
|
||||
extern void mmput(struct mm_struct *);
|
||||
#ifdef CONFIG_MMU
|
||||
/* same as above but performs the slow path from the async context. Can
|
||||
* be called from the atomic context as well
|
||||
*/
|
||||
void mmput_async(struct mm_struct *);
|
||||
#endif
|
||||
|
||||
/* Grab a reference to a task's mm, if it is not already going away */
|
||||
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
||||
@@ -205,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
|
||||
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMBARRIER
|
||||
enum {
|
||||
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
|
||||
MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
|
||||
};
|
||||
|
||||
static inline void membarrier_execve(struct task_struct *t)
|
||||
{
|
||||
atomic_set(&t->mm->membarrier_state, 0);
|
||||
}
|
||||
#else
|
||||
static inline void membarrier_execve(struct task_struct *t)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SCHED_MM_H */
|
||||
|
@@ -71,14 +71,6 @@ struct sched_domain_shared {
|
||||
atomic_t ref;
|
||||
atomic_t nr_busy_cpus;
|
||||
int has_idle_cores;
|
||||
|
||||
/*
|
||||
* Some variables from the most recent sd_lb_stats for this domain,
|
||||
* used by wake_affine().
|
||||
*/
|
||||
unsigned long nr_running;
|
||||
unsigned long load;
|
||||
unsigned long capacity;
|
||||
};
|
||||
|
||||
struct sched_domain {
|
||||
|
@@ -231,7 +231,7 @@ struct sctp_datahdr {
|
||||
__be32 tsn;
|
||||
__be16 stream;
|
||||
__be16 ssn;
|
||||
__be32 ppid;
|
||||
__u32 ppid;
|
||||
__u8 payload[0];
|
||||
};
|
||||
|
||||
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
|
||||
|
||||
struct sctp_strreset_outreq {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__u32 response_seq;
|
||||
__u32 send_reset_at_tsn;
|
||||
__u16 list_of_streams[0];
|
||||
__be32 request_seq;
|
||||
__be32 response_seq;
|
||||
__be32 send_reset_at_tsn;
|
||||
__be16 list_of_streams[0];
|
||||
};
|
||||
|
||||
struct sctp_strreset_inreq {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__u16 list_of_streams[0];
|
||||
__be32 request_seq;
|
||||
__be16 list_of_streams[0];
|
||||
};
|
||||
|
||||
struct sctp_strreset_tsnreq {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__be32 request_seq;
|
||||
};
|
||||
|
||||
struct sctp_strreset_addstrm {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 request_seq;
|
||||
__u16 number_of_streams;
|
||||
__u16 reserved;
|
||||
__be32 request_seq;
|
||||
__be16 number_of_streams;
|
||||
__be16 reserved;
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -752,16 +752,16 @@ enum {
|
||||
|
||||
struct sctp_strreset_resp {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 response_seq;
|
||||
__u32 result;
|
||||
__be32 response_seq;
|
||||
__be32 result;
|
||||
};
|
||||
|
||||
struct sctp_strreset_resptsn {
|
||||
struct sctp_paramhdr param_hdr;
|
||||
__u32 response_seq;
|
||||
__u32 result;
|
||||
__u32 senders_next_tsn;
|
||||
__u32 receivers_next_tsn;
|
||||
__be32 response_seq;
|
||||
__be32 result;
|
||||
__be32 senders_next_tsn;
|
||||
__be32 receivers_next_tsn;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_SCTP_H__ */
|
||||
|
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
||||
}
|
||||
|
||||
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
||||
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
||||
const struct cpumask *);
|
||||
void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
||||
const struct cpumask *);
|
||||
|
||||
#endif
|
||||
|
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
|
||||
|
||||
/**
|
||||
* srcu_read_lock_held - might we be in SRCU read-side critical section?
|
||||
* @sp: The srcu_struct structure to check
|
||||
*
|
||||
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
|
||||
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
|
||||
|
@@ -9,13 +9,16 @@
|
||||
/*
|
||||
* Simple wait queues
|
||||
*
|
||||
* While these are very similar to the other/complex wait queues (wait.h) the
|
||||
* most important difference is that the simple waitqueue allows for
|
||||
* deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
|
||||
* times.
|
||||
* While these are very similar to regular wait queues (wait.h) the most
|
||||
* important difference is that the simple waitqueue allows for deterministic
|
||||
* behaviour -- IOW it has strictly bounded IRQ and lock hold times.
|
||||
*
|
||||
* In order to make this so, we had to drop a fair number of features of the
|
||||
* other waitqueue code; notably:
|
||||
* Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
|
||||
* from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
|
||||
* priority task a chance to run.
|
||||
*
|
||||
* Secondly, we had to drop a fair number of features of the other waitqueue
|
||||
* code; notably:
|
||||
*
|
||||
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
|
||||
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
|
||||
@@ -24,12 +27,14 @@
|
||||
* - the exclusive mode; because this requires preserving the list order
|
||||
* and this is hard.
|
||||
*
|
||||
* - custom wake functions; because you cannot give any guarantees about
|
||||
* random code.
|
||||
* - custom wake callback functions; because you cannot give any guarantees
|
||||
* about random code. This also allows swait to be used in RT, such that
|
||||
* raw spinlock can be used for the swait queue head.
|
||||
*
|
||||
* As a side effect of this; the data structures are slimmer.
|
||||
*
|
||||
* One would recommend using this wait queue where possible.
|
||||
* As a side effect of these; the data structures are slimmer albeit more ad-hoc.
|
||||
* For all the above, note that simple wait queues should _only_ be used under
|
||||
* very specific realtime constraints -- it is best to stick with the regular
|
||||
* wait queues in most cases.
|
||||
*/
|
||||
|
||||
struct task_struct;
|
||||
|
@@ -42,7 +42,7 @@ enum {
|
||||
#define THREAD_ALIGN THREAD_SIZE
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
|
||||
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
|
||||
__GFP_ZERO)
|
||||
#else
|
||||
|
Reference in New Issue
Block a user