Merge tag 'v4.20-rc6' into rdma.git for-next

For dependencies in following patches.
This commit is contained in:
Jason Gunthorpe
2018-12-11 14:24:57 -07:00
1305 changed files with 15717 additions and 7810 deletions

View File

@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);

View File

@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);

View File

@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC)
#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif

View File

@@ -143,18 +143,6 @@
#define KASAN_ABI_VERSION 3
#endif
/*
* Because __no_sanitize_address conflicts with inlining:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* we do one or the other.
*/
#ifdef CONFIG_KASAN
#define __no_sanitize_address_or_inline \
__no_sanitize_address __maybe_unused notrace
#else
#define __no_sanitize_address_or_inline inline
#endif
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif

View File

@@ -189,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif

View File

@@ -4,22 +4,26 @@
/*
* The attributes in this file are unconditionally defined and they directly
* map to compiler attribute(s) -- except those that are optional.
* map to compiler attribute(s), unless one of the compilers does not support
* the attribute. In that case, __has_attribute is used to check for support
* and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
* The intention is to keep this file as simple as possible, as well as
* compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
* If an attribute is optional, state the reason in the comment.
* Provide links to the documentation of each supported compiler, if it exists.
*/
/*
* To check for optional attributes, we use __has_attribute, which is supported
* on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
* 4.6 <= gcc < 5, we implement __has_attribute by hand.
* __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
* In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
* by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have

View File

@@ -130,6 +130,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))

View File

@@ -7,6 +7,8 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
typedef unsigned long dax_entry_t;
struct iomap_ops;
struct dax_device;
struct dax_operations {
@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
bool dax_lock_mapping_entry(struct page *page);
void dax_unlock_mapping_entry(struct page *page);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
static inline bool dax_lock_mapping_entry(struct page *page)
static inline dax_entry_t dax_lock_page(struct page *page)
{
if (IS_DAX(page->mapping->host))
return true;
return false;
return ~0UL;
return 0;
}
static inline void dax_unlock_mapping_entry(struct page *page)
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
#endif

View File

@@ -5,7 +5,7 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
#define DIRECT_MAPPING_ERROR 0
#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>

View File

@@ -1167,6 +1167,8 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1185,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
static inline int efi_apply_persistent_mem_reservations(void)
{
return 0;
}
#endif
extern int efi_status_to_err(efi_status_t status);

View File

@@ -449,6 +449,13 @@ struct sock_reuseport;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
#if BITS_PER_LONG == 64
# define bpf_ctx_range_ptr(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#else
# define bpf_ctx_range_ptr(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
@@ -866,6 +873,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp);
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);

View File

@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
int n_pages)
{
atomic_sub(n_pages, &op->n_pages);
if (atomic_read(&op->n_pages) <= 0)
if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
fscache_op_complete(&op->op, false);
}

View File

@@ -777,8 +777,8 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer, unsigned long *retp);
function_graph_enter(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);

View File

@@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
int node);
int node, bool hugepage);
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
alloc_pages(gfp_mask, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
alloc_pages_vma(gfp_mask, 0, vma, addr, node)
alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);

View File

@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
enum sensor_hub_read_flags flag
enum sensor_hub_read_flags flag,
bool is_signed
);
/**

View File

@@ -722,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
* raw_event and event should return 0 on no action performed, 1 when no
* further processing should be done and negative on error
* raw_event and event should return negative on error, any other value will
* pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/**
* struct hid_scroll_counter - Utility class for processing high-resolution
* scroll events.
* @dev: the input device for which events should be reported.
* @microns_per_hi_res_unit: the amount moved by the user's finger for each
* high-resolution unit reported by the mouse, in
* microns.
* @resolution_multiplier: the wheel's resolution in high-resolution mode as a
* multiple of its lower resolution. For example, if
* moving the wheel by one "notch" would result in a
* value of 1 in low-resolution mode but 8 in
* high-resolution, the multiplier is 8.
* @remainder: counts the number of high-resolution units moved since the last
* low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
* only be used by class methods.
*/
struct hid_scroll_counter {
struct input_dev *dev;
int microns_per_hi_res_unit;
int resolution_multiplier;
int remainder;
};
void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
int hi_res_value);
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);

View File

@@ -905,6 +905,13 @@ struct vmbus_channel {
bool probe_done;
/*
* We must offload the handling of the primary/sub channels
* from the single-threaded vmbus_connection.work_queue to
* two different workqueue, otherwise we can block
* vmbus_connection.work_queue and hang: see vmbus_process_offer().
*/
struct work_struct add_channel_work;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)

View File

@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);

View File

@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);

View File

@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
if (mm_pud_folded(mm))
return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
if (mm_pud_folded(mm))
return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
if (mm_pmd_folded(mm))
return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
if (mm_pmd_folded(mm))
return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif

View File

@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
return (u64)nand->memorg.luns_per_target *
nand->memorg.eraseblocks_per_lun *
nand->memorg.pages_per_eraseblock;
return nand->memorg.ntargets * nand->memorg.luns_per_target *
nand->memorg.eraseblocks_per_lun;
}
/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
}
/**
* nanddev_pos_next_eraseblock() - Move a position to the next page
* nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*

View File

@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
}
/* fall through */
case NET_DIM_START_MEASURE:
net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
&dim->start_sample);
dim->state = NET_DIM_MEASURE_IN_PROGRESS;
break;
case NET_DIM_APPLY_NEW_PROFILE:

View File

@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
/* Variant of netdev_tx_sent_queue() for drivers that are aware
* that they should not test BQL status themselves.
* We do want to change __QUEUE_STATE_STACK_XOFF only for the last
* skb of a batch.
* Returns true if the doorbell must be used to kick the NIC.
*/
static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes,
bool xmit_more)
{
if (xmit_more) {
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
#endif
return netif_tx_queue_stopped(dev_queue);
}
netdev_tx_sent_queue(dev_queue, bytes);
return true;
}
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device

View File

@@ -314,7 +314,7 @@ enum {
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);

View File

@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
/* Used only when dumping a set, protected by rcu_read_lock_bh() */
/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;

View File

@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
struct nf_conntrack_tuple tuple;
};
enum grep_conntrack {
GRE_CT_UNREPLIED,
GRE_CT_REPLIED,
GRE_CT_MAX
};
struct netns_proto_gre {
struct nf_proto_net nf;
rwlock_t keymap_lock;
struct list_head keymap_list;
unsigned int gre_timeouts[GRE_CT_MAX];
};
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);

View File

@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
int watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.

View File

@@ -17,6 +17,8 @@
#define __DAVINCI_GPIO_PLATFORM_H
struct davinci_gpio_platform_data {
bool no_auto_base;
u32 base;
u32 ngpio;
u32 gpio_unbanked;
};

View File

@@ -1,6 +1,7 @@
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
#include <linux/jump_label.h>
#include <linux/psi_types.h>
#include <linux/sched.h>
@@ -9,7 +10,7 @@ struct css_set;
#ifdef CONFIG_PSI
extern bool psi_disabled;
extern struct static_key_false psi_disabled;
void psi_init(void);

View File

@@ -90,7 +90,10 @@ struct pstore_record {
*
* @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
* @bufsize: size of @buf available for crash dump writes
* @bufsize: size of @buf available for crash dump bytes (must match
* smallest number of bytes available for writing to a
* backend entry, since compressed bytes don't take kindly
* to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for

View File

@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_NOAUDIT 0x04
#define PTRACE_MODE_FSCREDS 0x08
#define PTRACE_MODE_REALCREDS 0x10
#define PTRACE_MODE_SCHED 0x20
#define PTRACE_MODE_IBPB 0x40
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
/**
* ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
/**
* ptrace_may_access - check whether the caller is permitted to access
* a target task.
* @task: target task
* @mode: selects type of access and caller credentials
*
* Returns true on success, false on denial.
*
* Similar to ptrace_may_access(). Only to be called from context switch
* code. Does not call into audit and the regular LSM hooks due to locking
* constraints.
*/
extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);

View File

@@ -1116,6 +1116,7 @@ struct task_struct {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
@@ -1453,6 +1454,8 @@ static inline bool is_percpu_thread(void)
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1487,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{

20
include/linux/sched/smt.h Normal file
View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SMT_H
#define _LINUX_SCHED_SMT_H
#include <linux/static_key.h>
#ifdef CONFIG_SCHED_SMT
extern struct static_key_false sched_smt_present;
static __always_inline bool sched_smt_active(void)
{
return static_branch_likely(&sched_smt_present);
}
#else
static inline bool sched_smt_active(void) { return false; }
#endif
void arch_smt_update(void);
#endif

View File

@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
*
* See the SFF-8472 specification and related documents for the definition
* of these structure members. This can be obtained from
* ftp://ftp.seagate.com/sff
* https://www.snia.org/technology-communities/sff/specifications
*/
struct sfp_eeprom_id {
struct sfp_eeprom_base base;

View File

@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
{
return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
}
static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
{
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
} else {
} else if (!skb_zcopy_is_nouarg(skb)) {
uarg->callback(uarg, zerocopy);
}

View File

@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
buf->bvec = NULL;
buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;

View File

@@ -196,6 +196,7 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
u32 tsoffset; /* timestamp offset */

View File

@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
* This will be called if %TIF_SYSCALL_TRACE has been set, when the
* current task has just entered the kernel for a system call.
* This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
* when the current task has just entered the kernel for a system call.
* Full user register state is available here. Changing the values
* in @regs can affect the system call number and arguments to be tried.
* It is safe to block here, preventing the system call from beginning.

View File

@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
int __maybe_unused idx = 0; \
int __maybe_unused __idx = 0; \
\
if (!(cond)) \
return; \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* doesn't work from the idle path. \
*/ \
if (rcuidle) { \
idx = srcu_read_lock_notrace(&tracepoint_srcu); \
__idx = srcu_read_lock_notrace(&tracepoint_srcu);\
rcu_irq_enter_irqson(); \
} \
\
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\
if (rcuidle) { \
rcu_irq_exit_irqson(); \
srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \
\
preempt_enable_notrace(); \

View File

@@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
extern void tty_save_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);

View File

@@ -407,11 +407,11 @@ struct usb_host_bos {
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
unsigned char type, void **ptr);
unsigned char type, void **ptr, size_t min);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
__usb_get_extra_descriptor((ifpoint)->extra, \
(ifpoint)->extralen, \
type, (void **)ptr)
type, (void **)ptr, sizeof(**(ptr)))
/* ----------------------------------------------------------------------- */

View File

@@ -66,4 +66,7 @@
/* Device needs a pause after every control message. */
#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
/* Hub needs extra delay after resetting its port. */
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
#endif /* __LINUX_USB_QUIRKS_H */

View File

@@ -289,9 +289,7 @@ struct xarray {
void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_cmpxchg(struct xarray *, unsigned long index,
void *old, void *entry, gfp_t);
int xa_reserve(struct xarray *, unsigned long index, gfp_t);
void *xa_erase(struct xarray *, unsigned long index);
void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
void *entry, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -343,65 +341,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
return xa->xa_flags & XA_FLAGS_MARK(mark);
}
/**
* xa_erase() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
*
* This function is the equivalent of calling xa_store() with %NULL as
* the third argument. The XArray does not need to allocate memory, so
* the user does not need to provide GFP flags.
*
* Context: Process context. Takes and releases the xa_lock.
* Return: The entry which used to be at this index.
*/
static inline void *xa_erase(struct xarray *xa, unsigned long index)
{
return xa_store(xa, index, NULL, 0);
}
/**
* xa_insert() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* If you would rather see the existing entry in the array, use xa_cmpxchg().
* This function is for users who don't care what the entry is, only that
* one is present.
*
* Context: Process context. Takes and releases the xa_lock.
* May sleep if the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
if (!curr)
return 0;
if (xa_is_err(curr))
return xa_err(curr);
return -EEXIST;
}
/**
* xa_release() - Release a reserved entry.
* @xa: XArray.
* @index: Index of entry.
*
* After calling xa_reserve(), you can call this function to release the
* reservation. If the entry at @index has been stored to, this function
* will do nothing.
*/
static inline void xa_release(struct xarray *xa, unsigned long index)
{
xa_cmpxchg(xa, index, NULL, NULL, 0);
}
/**
* xa_for_each() - Iterate over a portion of an XArray.
* @xa: XArray.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -486,6 +426,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
return -EEXIST;
}
/**
* xa_store_bh() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* This function is like calling xa_store() except it disables softirqs
* while holding the array lock.
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The entry which used to be at this index.
*/
static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;
xa_lock_bh(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_bh(xa);
return curr;
}
/**
* xa_store_irq() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* This function is like calling xa_store() except it disables interrupts
* while holding the array lock.
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
* Return: The entry which used to be at this index.
*/
static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;
xa_lock_irq(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock_irq(xa);
return curr;
}
/**
* xa_erase_bh() - Erase this entry from the XArray.
* @xa: XArray.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
* the third argument. The XArray does not need to allocate memory, so
* the user does not need to provide GFP flags.
*
* Context: Process context. Takes and releases the xa_lock while
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The entry which used to be at this index.
*/
@@ -534,6 +526,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
return entry;
}
/**
* xa_cmpxchg() - Conditionally replace an entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
* @entry: New value to place in array.
* @gfp: Memory allocation flags.
*
* If the entry at @index is the same as @old, replace it with @entry.
* If the return value is equal to @old, then the exchange was successful.
*
* Context: Any context. Takes and releases the xa_lock. May sleep
* if the @gfp flags permit.
* Return: The old value at this index or xa_err() if an error happened.
*/
static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;
xa_lock(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock(xa);
return curr;
}
/**
* xa_insert() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* If you would rather see the existing entry in the array, use xa_cmpxchg().
* This function is for users who don't care what the entry is, only that
* one is present.
*
* Context: Process context. Takes and releases the xa_lock.
* May sleep if the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
if (!curr)
return 0;
if (xa_is_err(curr))
return xa_err(curr);
return -EEXIST;
}
/**
* xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
* Updates the @id pointer with the index, then stores the entry at that
* index. A concurrent lookup will not see an uninitialised @id.
*
* Context: Process context. Takes and releases the xa_lock while
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
* there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
return err;
}
/**
* xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* Ensures there is somewhere to store an entry at @index in the array.
* If there is already something stored at @index, this function does
* nothing. If there was nothing there, the entry is marked as reserved.
* Loading from a reserved entry returns a %NULL pointer.
*
* If you do not use the entry that you have reserved, call xa_release()
* or xa_erase() to free any unnecessary memory.
*
* Context: Any context. Takes and releases the xa_lock.
* May sleep if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline
int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
int ret;
xa_lock(xa);
ret = __xa_reserve(xa, index, gfp);
xa_unlock(xa);
return ret;
}
/**
* xa_reserve_bh() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* A softirq-disabling version of xa_reserve().
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline
int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
{
int ret;
xa_lock_bh(xa);
ret = __xa_reserve(xa, index, gfp);
xa_unlock_bh(xa);
return ret;
}
/**
* xa_reserve_irq() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* An interrupt-disabling version of xa_reserve().
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline
int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
{
int ret;
xa_lock_irq(xa);
ret = __xa_reserve(xa, index, gfp);
xa_unlock_irq(xa);
return ret;
}
/**
* xa_release() - Release a reserved entry.
* @xa: XArray.
* @index: Index of entry.
*
* After calling xa_reserve(), you can call this function to release the
* reservation. If the entry at @index has been stored to, this function
* will do nothing.
*/
static inline void xa_release(struct xarray *xa, unsigned long index)
{
xa_cmpxchg(xa, index, NULL, NULL, 0);
}
/* Everything below here is the Advanced API. Proceed with caution. */
/*