Merge tag 'v5.7-rc7' into efi/core, to refresh the branch and pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -65,6 +65,7 @@ struct amba_device {
|
||||
struct device dev;
|
||||
struct resource res;
|
||||
struct clk *pclk;
|
||||
struct device_dma_parameters dma_parms;
|
||||
unsigned int periphid;
|
||||
unsigned int cid;
|
||||
struct amba_cs_uci_id uci;
|
||||
|
@@ -54,7 +54,6 @@ enum wb_reason {
|
||||
WB_REASON_SYNC,
|
||||
WB_REASON_PERIODIC,
|
||||
WB_REASON_LAPTOP_TIMER,
|
||||
WB_REASON_FREE_MORE_MEM,
|
||||
WB_REASON_FS_FREE_SPACE,
|
||||
/*
|
||||
* There is no bdi forker thread any more and works are done
|
||||
@@ -220,6 +219,7 @@ struct backing_dev_info {
|
||||
wait_queue_head_t wb_waitq;
|
||||
|
||||
struct device *dev;
|
||||
char dev_name[64];
|
||||
struct device *owner;
|
||||
|
||||
struct timer_list laptop_mode_wb_timer;
|
||||
|
@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
|
||||
(1 << WB_async_congested));
|
||||
}
|
||||
|
||||
extern const char *bdi_unknown_name;
|
||||
|
||||
static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
|
||||
{
|
||||
if (!bdi || !bdi->dev)
|
||||
return bdi_unknown_name;
|
||||
return dev_name(bdi->dev);
|
||||
}
|
||||
const char *bdi_dev_name(struct backing_dev_info *bdi);
|
||||
|
||||
#endif /* _LINUX_BACKING_DEV_H */
|
||||
|
@@ -245,6 +245,7 @@
|
||||
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
|
||||
#define BCM54810_SHD_CLK_CTL 0x3
|
||||
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
|
||||
#define BCM54810_SHD_SCR3_TRDDAPD 0x0100
|
||||
|
||||
/* BCM54612E Registers */
|
||||
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
|
||||
|
@@ -356,4 +356,10 @@ static inline void *offset_to_ptr(const int *off)
|
||||
/* &a[0] degrades to a pointer: a different type from an array */
|
||||
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
|
||||
|
||||
/*
|
||||
* This is needed in functions which generate the stack canary, see
|
||||
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
|
||||
*/
|
||||
#define prevent_tail_call_optimization() mb()
|
||||
|
||||
#endif /* __LINUX_COMPILER_H */
|
||||
|
@@ -521,6 +521,15 @@ struct cper_sec_pcie {
|
||||
u8 aer_info[96];
|
||||
};
|
||||
|
||||
/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */
|
||||
struct cper_sec_fw_err_rec_ref {
|
||||
u8 record_type;
|
||||
u8 revision;
|
||||
u8 reserved[6];
|
||||
u64 record_identifier;
|
||||
guid_t record_identifier_guid;
|
||||
};
|
||||
|
||||
/* Reset to default packing */
|
||||
#pragma pack()
|
||||
|
||||
|
@@ -103,8 +103,8 @@ void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
|
||||
u8 *value);
|
||||
void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
|
||||
u16 *value);
|
||||
struct dentry *debugfs_create_u32(const char *name, umode_t mode,
|
||||
struct dentry *parent, u32 *value);
|
||||
void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
|
||||
u32 *value);
|
||||
void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
|
||||
u64 *value);
|
||||
struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
|
||||
@@ -250,12 +250,8 @@ static inline void debugfs_create_u8(const char *name, umode_t mode,
|
||||
static inline void debugfs_create_u16(const char *name, umode_t mode,
|
||||
struct dentry *parent, u16 *value) { }
|
||||
|
||||
static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u32 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
static inline void debugfs_create_u32(const char *name, umode_t mode,
|
||||
struct dentry *parent, u32 *value) { }
|
||||
|
||||
static inline void debugfs_create_u64(const char *name, umode_t mode,
|
||||
struct dentry *parent, u64 *value) { }
|
||||
|
@@ -329,13 +329,12 @@ struct dma_buf {
|
||||
|
||||
/**
|
||||
* struct dma_buf_attach_ops - importer operations for an attachment
|
||||
* @move_notify: [optional] notification that the DMA-buf is moving
|
||||
*
|
||||
* Attachment operations implemented by the importer.
|
||||
*/
|
||||
struct dma_buf_attach_ops {
|
||||
/**
|
||||
* @move_notify
|
||||
* @move_notify: [optional] notification that the DMA-buf is moving
|
||||
*
|
||||
* If this callback is provided the framework can avoid pinning the
|
||||
* backing store while mappings exists.
|
||||
|
@@ -83,9 +83,9 @@ enum dma_transfer_direction {
|
||||
/**
|
||||
* Interleaved Transfer Request
|
||||
* ----------------------------
|
||||
* A chunk is collection of contiguous bytes to be transfered.
|
||||
* A chunk is collection of contiguous bytes to be transferred.
|
||||
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
|
||||
* ICGs may or maynot change between chunks.
|
||||
* ICGs may or may not change between chunks.
|
||||
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
|
||||
* that when repeated an integral number of times, specifies the transfer.
|
||||
* A transfer template is specification of a Frame, the number of times
|
||||
@@ -341,13 +341,11 @@ struct dma_chan {
|
||||
* @chan: driver channel device
|
||||
* @device: sysfs device
|
||||
* @dev_id: parent dma_device dev_id
|
||||
* @idr_ref: reference count to gate release of dma_device dev_id
|
||||
*/
|
||||
struct dma_chan_dev {
|
||||
struct dma_chan *chan;
|
||||
struct device device;
|
||||
int dev_id;
|
||||
atomic_t *idr_ref;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -835,6 +833,8 @@ struct dma_device {
|
||||
int dev_id;
|
||||
struct device *dev;
|
||||
struct module *owner;
|
||||
struct ida chan_ida;
|
||||
struct mutex chan_mutex; /* to protect chan_ida */
|
||||
|
||||
u32 src_addr_widths;
|
||||
u32 dst_addr_widths;
|
||||
@@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
||||
* dmaengine_synchronize() needs to be called before it is safe to free
|
||||
* any memory that is accessed by previously submitted descriptors or before
|
||||
* freeing any resources accessed from within the completion callback of any
|
||||
* perviously submitted descriptors.
|
||||
* previously submitted descriptors.
|
||||
*
|
||||
* This function can be called from atomic context as well as from within a
|
||||
* complete callback of a descriptor submitted on the same channel.
|
||||
@@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
|
||||
*
|
||||
* Synchronizes to the DMA channel termination to the current context. When this
|
||||
* function returns it is guaranteed that all transfers for previously issued
|
||||
* descriptors have stopped and and it is safe to free the memory assoicated
|
||||
* descriptors have stopped and it is safe to free the memory associated
|
||||
* with them. Furthermore it is guaranteed that all complete callback functions
|
||||
* for a previously submitted descriptor have finished running and it is safe to
|
||||
* free resources accessed from within the complete callbacks.
|
||||
|
@@ -1245,4 +1245,6 @@ struct linux_efi_memreserve {
|
||||
|
||||
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
|
||||
|
||||
char *efi_systab_show_arch(char *str);
|
||||
|
||||
#endif /* _LINUX_EFI_H */
|
||||
|
@@ -983,7 +983,7 @@ struct file_handle {
|
||||
__u32 handle_bytes;
|
||||
int handle_type;
|
||||
/* file identifier */
|
||||
unsigned char f_handle[0];
|
||||
unsigned char f_handle[];
|
||||
};
|
||||
|
||||
static inline struct file *get_file(struct file *f)
|
||||
|
@@ -210,6 +210,29 @@ struct ftrace_ops {
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct ftrace_ops __rcu *ftrace_ops_list;
|
||||
extern struct ftrace_ops ftrace_list_end;
|
||||
|
||||
/*
|
||||
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
||||
* can use rcu_dereference_raw_check() is that elements removed from this list
|
||||
* are simply leaked, so there is no need to interact with a grace-period
|
||||
* mechanism. The rcu_dereference_raw_check() calls are needed to handle
|
||||
* concurrent insertions into the ftrace_global_list.
|
||||
*
|
||||
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
||||
*/
|
||||
#define do_for_each_ftrace_op(op, list) \
|
||||
op = rcu_dereference_raw_check(list); \
|
||||
do
|
||||
|
||||
/*
|
||||
* Optimized for just a single item in the list (as that is the normal case).
|
||||
*/
|
||||
#define while_for_each_ftrace_op(op) \
|
||||
while (likely(op = rcu_dereference_raw_check((op)->next)) && \
|
||||
unlikely((op) != &ftrace_list_end))
|
||||
|
||||
/*
|
||||
* Type of the current tracing.
|
||||
*/
|
||||
|
@@ -17,9 +17,12 @@ enum host1x_class {
|
||||
HOST1X_CLASS_GR3D = 0x60,
|
||||
};
|
||||
|
||||
struct host1x;
|
||||
struct host1x_client;
|
||||
struct iommu_group;
|
||||
|
||||
u64 host1x_get_dma_mask(struct host1x *host1x);
|
||||
|
||||
/**
|
||||
* struct host1x_client_ops - host1x client operations
|
||||
* @init: host1x client initialization code
|
||||
|
@@ -29,7 +29,7 @@ struct i2c_mux_core {
|
||||
|
||||
int num_adapters;
|
||||
int max_adapters;
|
||||
struct i2c_adapter *adapter[0];
|
||||
struct i2c_adapter *adapter[];
|
||||
};
|
||||
|
||||
struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
/*
|
||||
* i2c.h - definitions for the Linux i2c bus interface
|
||||
* Copyright (C) 1995-2000 Simon G. Vogl
|
||||
* Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de>
|
||||
* Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org>
|
||||
*
|
||||
* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
|
||||
* Frodo Looijaard <frodol@dds.nl>
|
||||
|
@@ -600,7 +600,7 @@ void iio_device_unregister(struct iio_dev *indio_dev);
|
||||
* 0 on success, negative error number on failure.
|
||||
*/
|
||||
#define devm_iio_device_register(dev, indio_dev) \
|
||||
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
|
||||
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
|
||||
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
|
||||
struct module *this_mod);
|
||||
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
|
||||
|
@@ -813,8 +813,11 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm);
|
||||
|
||||
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except,
|
||||
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
|
||||
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
|
||||
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except);
|
||||
bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
unsigned long *vcpu_bitmap);
|
||||
|
||||
@@ -1048,7 +1051,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
|
||||
start = slot + 1;
|
||||
}
|
||||
|
||||
if (gfn >= memslots[start].base_gfn &&
|
||||
if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
|
||||
gfn < memslots[start].base_gfn + memslots[start].npages) {
|
||||
atomic_set(&slots->lru_slot, start);
|
||||
return &memslots[start];
|
||||
|
@@ -55,7 +55,7 @@ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
|
||||
LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
|
||||
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
|
||||
struct fs_context *src_sc)
|
||||
LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc,
|
||||
LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
|
||||
struct fs_parameter *param)
|
||||
LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
|
||||
LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
|
||||
@@ -243,7 +243,7 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
|
||||
char **value)
|
||||
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
|
||||
LSM_HOOK(int, 0, ismaclabel, const char *name)
|
||||
LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata,
|
||||
LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
|
||||
u32 *seclen)
|
||||
LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
|
||||
LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
|
||||
|
@@ -783,6 +783,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
||||
atomic_long_inc(&memcg->memory_events[event]);
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
|
||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
|
||||
break;
|
||||
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
|
||||
break;
|
||||
} while ((memcg = parent_mem_cgroup(memcg)) &&
|
||||
|
@@ -53,9 +53,9 @@ enum mhi_callback {
|
||||
* @MHI_CHAIN: Linked transfer
|
||||
*/
|
||||
enum mhi_flags {
|
||||
MHI_EOB,
|
||||
MHI_EOT,
|
||||
MHI_CHAIN,
|
||||
MHI_EOB = BIT(0),
|
||||
MHI_EOT = BIT(1),
|
||||
MHI_CHAIN = BIT(2),
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -335,14 +335,15 @@ struct mhi_controller_config {
|
||||
* @syserr_worker: System error worker
|
||||
* @state_event: State change event
|
||||
* @status_cb: CB function to notify power states of the device (required)
|
||||
* @link_status: CB function to query link status of the device (required)
|
||||
* @wake_get: CB function to assert device wake (optional)
|
||||
* @wake_put: CB function to de-assert device wake (optional)
|
||||
* @wake_toggle: CB function to assert and de-assert device wake (optional)
|
||||
* @runtime_get: CB function to controller runtime resume (required)
|
||||
* @runtimet_put: CB function to decrement pm usage (required)
|
||||
* @runtime_put: CB function to decrement pm usage (required)
|
||||
* @map_single: CB function to create TRE buffer
|
||||
* @unmap_single: CB function to destroy TRE buffer
|
||||
* @read_reg: Read a MHI register via the physical link (required)
|
||||
* @write_reg: Write a MHI register via the physical link (required)
|
||||
* @buffer_len: Bounce buffer length
|
||||
* @bounce_buf: Use of bounce buffer
|
||||
* @fbc_download: MHI host needs to do complete image transfer (optional)
|
||||
@@ -417,7 +418,6 @@ struct mhi_controller {
|
||||
|
||||
void (*status_cb)(struct mhi_controller *mhi_cntrl,
|
||||
enum mhi_callback cb);
|
||||
int (*link_status)(struct mhi_controller *mhi_cntrl);
|
||||
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
|
||||
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
|
||||
void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
|
||||
@@ -427,6 +427,10 @@ struct mhi_controller {
|
||||
struct mhi_buf_info *buf);
|
||||
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_buf_info *buf);
|
||||
int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
|
||||
u32 *out);
|
||||
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
|
||||
u32 val);
|
||||
|
||||
size_t buffer_len;
|
||||
bool bounce_buf;
|
||||
|
@@ -213,6 +213,12 @@ enum mlx5_port_status {
|
||||
MLX5_PORT_DOWN = 2,
|
||||
};
|
||||
|
||||
enum mlx5_cmdif_state {
|
||||
MLX5_CMDIF_STATE_UNINITIALIZED,
|
||||
MLX5_CMDIF_STATE_UP,
|
||||
MLX5_CMDIF_STATE_DOWN,
|
||||
};
|
||||
|
||||
struct mlx5_cmd_first {
|
||||
__be32 data[4];
|
||||
};
|
||||
@@ -258,6 +264,7 @@ struct mlx5_cmd_stats {
|
||||
struct mlx5_cmd {
|
||||
struct mlx5_nb nb;
|
||||
|
||||
enum mlx5_cmdif_state state;
|
||||
void *cmd_alloc_buf;
|
||||
dma_addr_t alloc_dma;
|
||||
int alloc_size;
|
||||
@@ -284,6 +291,7 @@ struct mlx5_cmd {
|
||||
struct semaphore sem;
|
||||
struct semaphore pages_sem;
|
||||
int mode;
|
||||
u16 allowed_opcode;
|
||||
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
|
||||
struct dma_pool *pool;
|
||||
struct mlx5_cmd_debug dbg;
|
||||
@@ -743,6 +751,7 @@ struct mlx5_cmd_work_ent {
|
||||
struct delayed_work cb_timeout_work;
|
||||
void *context;
|
||||
int idx;
|
||||
struct completion handling;
|
||||
struct completion done;
|
||||
struct mlx5_cmd *cmd;
|
||||
struct work_struct work;
|
||||
@@ -874,10 +883,17 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
|
||||
return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
|
||||
}
|
||||
|
||||
enum {
|
||||
CMD_ALLOWED_OPCODE_ALL,
|
||||
};
|
||||
|
||||
int mlx5_cmd_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
|
||||
enum mlx5_cmdif_state cmdif_state);
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
|
||||
|
||||
struct mlx5_async_ctx {
|
||||
struct mlx5_core_dev *dev;
|
||||
|
@@ -1317,11 +1317,13 @@ struct nfs41_impl_id {
|
||||
struct nfstime4 date;
|
||||
};
|
||||
|
||||
#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
|
||||
struct nfs41_bind_conn_to_session_args {
|
||||
struct nfs_client *client;
|
||||
struct nfs4_sessionid sessionid;
|
||||
u32 dir;
|
||||
bool use_conn_in_rdma_mode;
|
||||
int retries;
|
||||
};
|
||||
|
||||
struct nfs41_bind_conn_to_session_res {
|
||||
|
@@ -185,6 +185,7 @@ int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
|
||||
void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
|
||||
u8 sensor_num);
|
||||
|
||||
int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub);
|
||||
int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub);
|
||||
void cros_ec_sensorhub_ring_remove(void *arg);
|
||||
int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
|
||||
|
@@ -25,6 +25,7 @@ struct platform_device {
|
||||
bool id_auto;
|
||||
struct device dev;
|
||||
u64 platform_dma_mask;
|
||||
struct device_dma_parameters dma_parms;
|
||||
u32 num_resources;
|
||||
struct resource *resource;
|
||||
|
||||
|
@@ -220,10 +220,8 @@ struct pnp_card {
|
||||
#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
|
||||
#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
|
||||
#define to_pnp_card(n) container_of(n, struct pnp_card, dev)
|
||||
#define pnp_for_each_card(card) \
|
||||
for((card) = global_to_pnp_card(pnp_cards.next); \
|
||||
(card) != global_to_pnp_card(&pnp_cards); \
|
||||
(card) = global_to_pnp_card((card)->global_list.next))
|
||||
#define pnp_for_each_card(card) \
|
||||
list_for_each_entry(card, &pnp_cards, global_list)
|
||||
|
||||
struct pnp_card_link {
|
||||
struct pnp_card *card;
|
||||
@@ -276,14 +274,9 @@ struct pnp_dev {
|
||||
#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
|
||||
#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
|
||||
#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
|
||||
#define pnp_for_each_dev(dev) \
|
||||
for((dev) = global_to_pnp_dev(pnp_global.next); \
|
||||
(dev) != global_to_pnp_dev(&pnp_global); \
|
||||
(dev) = global_to_pnp_dev((dev)->global_list.next))
|
||||
#define card_for_each_dev(card,dev) \
|
||||
for((dev) = card_to_pnp_dev((card)->devices.next); \
|
||||
(dev) != card_to_pnp_dev(&(card)->devices); \
|
||||
(dev) = card_to_pnp_dev((dev)->card_list.next))
|
||||
#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
|
||||
#define card_for_each_dev(card, dev) \
|
||||
list_for_each_entry(dev, &(card)->devices, card_list)
|
||||
#define pnp_dev_name(dev) (dev)->name
|
||||
|
||||
static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
|
||||
@@ -437,14 +430,10 @@ struct pnp_protocol {
|
||||
};
|
||||
|
||||
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
|
||||
#define protocol_for_each_card(protocol,card) \
|
||||
for((card) = protocol_to_pnp_card((protocol)->cards.next); \
|
||||
(card) != protocol_to_pnp_card(&(protocol)->cards); \
|
||||
(card) = protocol_to_pnp_card((card)->protocol_list.next))
|
||||
#define protocol_for_each_dev(protocol,dev) \
|
||||
for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
|
||||
(dev) != protocol_to_pnp_dev(&(protocol)->devices); \
|
||||
(dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
|
||||
#define protocol_for_each_card(protocol, card) \
|
||||
list_for_each_entry(card, &(protocol)->cards, protocol_list)
|
||||
#define protocol_for_each_dev(protocol, dev) \
|
||||
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
|
||||
|
||||
extern struct bus_type pnp_bus_type;
|
||||
|
||||
|
@@ -105,10 +105,10 @@ struct ptp_system_timestamp {
|
||||
* parameter func: the desired function to use.
|
||||
* parameter chan: the function channel index to use.
|
||||
*
|
||||
* @do_work: Request driver to perform auxiliary (periodic) operations
|
||||
* Driver should return delay of the next auxiliary work scheduling
|
||||
* time (>=0) or negative value in case further scheduling
|
||||
* is not required.
|
||||
* @do_aux_work: Request driver to perform auxiliary (periodic) operations
|
||||
* Driver should return delay of the next auxiliary work
|
||||
* scheduling time (>=0) or negative value in case further
|
||||
* scheduling is not required.
|
||||
*
|
||||
* Drivers should embed their ptp_clock_info within a private
|
||||
* structure, obtaining a reference to it using container_of().
|
||||
|
@@ -187,6 +187,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
|
||||
dst->sg.data[which] = src->sg.data[which];
|
||||
dst->sg.data[which].length = size;
|
||||
dst->sg.size += size;
|
||||
src->sg.size -= size;
|
||||
src->sg.data[which].length -= size;
|
||||
src->sg.data[which].offset += size;
|
||||
}
|
||||
|
@@ -177,6 +177,8 @@ struct plat_stmmacenet_data {
|
||||
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
|
||||
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
|
||||
void (*fix_mac_speed)(void *priv, unsigned int speed);
|
||||
int (*serdes_powerup)(struct net_device *ndev, void *priv);
|
||||
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
|
||||
int (*init)(struct platform_device *pdev, void *priv);
|
||||
void (*exit)(struct platform_device *pdev, void *priv);
|
||||
struct mac_device_info *(*setup)(void *priv);
|
||||
|
@@ -71,7 +71,13 @@ struct rpc_clnt {
|
||||
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
||||
struct dentry *cl_debugfs; /* debugfs directory */
|
||||
#endif
|
||||
struct rpc_xprt_iter cl_xpi;
|
||||
/* cl_work is only needed after cl_xpi is no longer used,
|
||||
* and that are of similar size
|
||||
*/
|
||||
union {
|
||||
struct rpc_xprt_iter cl_xpi;
|
||||
struct work_struct cl_work;
|
||||
};
|
||||
const struct cred *cl_cred;
|
||||
};
|
||||
|
||||
@@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task)
|
||||
(task->tk_msg.rpc_proc->p_decode != NULL);
|
||||
}
|
||||
|
||||
static inline void rpc_task_close_connection(struct rpc_task *task)
|
||||
{
|
||||
if (task->tk_xprt)
|
||||
xprt_force_disconnect(task->tk_xprt);
|
||||
}
|
||||
#endif /* _LINUX_SUNRPC_CLNT_H */
|
||||
|
@@ -21,6 +21,7 @@
|
||||
struct gss_ctx {
|
||||
struct gss_api_mech *mech_type;
|
||||
void *internal_ctx_id;
|
||||
unsigned int slack, align;
|
||||
};
|
||||
|
||||
#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
|
||||
@@ -66,6 +67,7 @@ u32 gss_wrap(
|
||||
u32 gss_unwrap(
|
||||
struct gss_ctx *ctx_id,
|
||||
int offset,
|
||||
int len,
|
||||
struct xdr_buf *inbuf);
|
||||
u32 gss_delete_sec_context(
|
||||
struct gss_ctx **ctx_id);
|
||||
@@ -126,6 +128,7 @@ struct gss_api_ops {
|
||||
u32 (*gss_unwrap)(
|
||||
struct gss_ctx *ctx_id,
|
||||
int offset,
|
||||
int len,
|
||||
struct xdr_buf *buf);
|
||||
void (*gss_delete_sec_context)(
|
||||
void *internal_ctx_id);
|
||||
|
@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
|
||||
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
||||
struct xdr_buf *buf,
|
||||
struct page **pages); /* v2 encryption function */
|
||||
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
|
||||
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
|
||||
struct xdr_buf *buf, u32 *headskip,
|
||||
u32 *tailskip); /* v2 decryption function */
|
||||
};
|
||||
@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
|
||||
struct xdr_buf *outbuf, struct page **pages);
|
||||
|
||||
u32
|
||||
gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
|
||||
gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
|
||||
struct xdr_buf *buf);
|
||||
|
||||
|
||||
@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
|
||||
struct page **pages);
|
||||
|
||||
u32
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
|
||||
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
|
||||
struct xdr_buf *buf, u32 *plainoffset,
|
||||
u32 *plainlen);
|
||||
|
||||
|
@@ -170,6 +170,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
|
||||
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_recv_ctxt *ctxt);
|
||||
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
|
||||
extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
|
||||
extern int svc_rdma_recvfrom(struct svc_rqst *);
|
||||
|
||||
/* svc_rdma_rw.c */
|
||||
|
@@ -184,6 +184,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
|
||||
extern void xdr_shift_buf(struct xdr_buf *, size_t);
|
||||
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
|
||||
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
|
||||
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
|
||||
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
|
||||
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
|
||||
|
||||
|
@@ -78,47 +78,6 @@ struct tcp_sack_block {
|
||||
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
|
||||
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
struct mptcp_options_received {
|
||||
u64 sndr_key;
|
||||
u64 rcvr_key;
|
||||
u64 data_ack;
|
||||
u64 data_seq;
|
||||
u32 subflow_seq;
|
||||
u16 data_len;
|
||||
u16 mp_capable : 1,
|
||||
mp_join : 1,
|
||||
dss : 1,
|
||||
add_addr : 1,
|
||||
rm_addr : 1,
|
||||
family : 4,
|
||||
echo : 1,
|
||||
backup : 1;
|
||||
u32 token;
|
||||
u32 nonce;
|
||||
u64 thmac;
|
||||
u8 hmac[20];
|
||||
u8 join_id;
|
||||
u8 use_map:1,
|
||||
dsn64:1,
|
||||
data_fin:1,
|
||||
use_ack:1,
|
||||
ack64:1,
|
||||
mpc_map:1,
|
||||
__unused:2;
|
||||
u8 addr_id;
|
||||
u8 rm_id;
|
||||
union {
|
||||
struct in_addr addr;
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
struct in6_addr addr6;
|
||||
#endif
|
||||
};
|
||||
u64 ahmac;
|
||||
u16 port;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct tcp_options_received {
|
||||
/* PAWS/RTTM data */
|
||||
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
|
||||
@@ -136,9 +95,6 @@ struct tcp_options_received {
|
||||
u8 num_sacks; /* Number of SACK blocks */
|
||||
u16 user_mss; /* mss requested by user in ioctl */
|
||||
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
struct mptcp_options_received mptcp;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
|
||||
@@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
|
||||
#if IS_ENABLED(CONFIG_SMC)
|
||||
rx_opt->smc_ok = 0;
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
rx_opt->mptcp.mp_capable = 0;
|
||||
rx_opt->mptcp.mp_join = 0;
|
||||
rx_opt->mptcp.add_addr = 0;
|
||||
rx_opt->mptcp.rm_addr = 0;
|
||||
rx_opt->mptcp.dss = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* This is the max number of SACKS that we'll generate and process. It's safe
|
||||
|
@@ -156,8 +156,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
* Note, the proto and args passed in includes "__data" as the first parameter.
|
||||
* The reason for this is to handle the "void" prototype. If a tracepoint
|
||||
* has a "void" prototype, then it is invalid to declare a function
|
||||
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
|
||||
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
|
||||
* as "(void *, void)".
|
||||
*/
|
||||
#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
|
||||
do { \
|
||||
@@ -373,25 +372,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
# define __tracepoint_string
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
|
||||
* (void). "void" is a special value in a function prototype and can
|
||||
* not be combined with other arguments. Since the DECLARE_TRACE()
|
||||
* macro adds a data element at the beginning of the prototype,
|
||||
* we need a way to differentiate "(void *data, proto)" from
|
||||
* "(void *data, void)". The second prototype is invalid.
|
||||
*
|
||||
* DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
|
||||
* and "void *__data" as the callback prototype.
|
||||
*
|
||||
* DECLARE_TRACE() passes "proto" as the tracepoint protoype and
|
||||
* "void *__data, proto" as the callback prototype.
|
||||
*/
|
||||
#define DECLARE_TRACE_NOARGS(name) \
|
||||
__DECLARE_TRACE(name, void, , \
|
||||
cpu_online(raw_smp_processor_id()), \
|
||||
void *__data, __data)
|
||||
|
||||
#define DECLARE_TRACE(name, proto, args) \
|
||||
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
|
||||
cpu_online(raw_smp_processor_id()), \
|
||||
|
@@ -66,7 +66,7 @@ struct tty_buffer {
|
||||
int read;
|
||||
int flags;
|
||||
/* Data points here */
|
||||
unsigned long data[0];
|
||||
unsigned long data[];
|
||||
};
|
||||
|
||||
/* Values for .flags field of tty_buffer */
|
||||
|
@@ -164,7 +164,7 @@ struct vdpa_config_ops {
|
||||
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
|
||||
|
||||
/* Device ops */
|
||||
u16 (*get_vq_align)(struct vdpa_device *vdev);
|
||||
u32 (*get_vq_align)(struct vdpa_device *vdev);
|
||||
u64 (*get_features)(struct vdpa_device *vdev);
|
||||
int (*set_features)(struct vdpa_device *vdev, u64 features);
|
||||
void (*set_config_cb)(struct vdpa_device *vdev,
|
||||
|
@@ -1,5 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_VERMAGIC_H
|
||||
#define _LINUX_VERMAGIC_H
|
||||
|
||||
#include <generated/utsrelease.h>
|
||||
#include <asm/vermagic.h>
|
||||
|
||||
/* Simply sanity version stamp for modules. */
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -24,9 +28,6 @@
|
||||
#else
|
||||
#define MODULE_VERMAGIC_MODVERSIONS ""
|
||||
#endif
|
||||
#ifndef MODULE_ARCH_VERMAGIC
|
||||
#define MODULE_ARCH_VERMAGIC ""
|
||||
#endif
|
||||
#ifdef RANDSTRUCT_PLUGIN
|
||||
#include <generated/randomize_layout_hash.h>
|
||||
#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
|
||||
@@ -41,3 +42,4 @@
|
||||
MODULE_ARCH_VERMAGIC \
|
||||
MODULE_RANDSTRUCT_PLUGIN
|
||||
|
||||
#endif /* _LINUX_VERMAGIC_H */
|
||||
|
@@ -9,7 +9,6 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/vringh.h>
|
||||
|
||||
/**
|
||||
* virtqueue - a queue to register buffers for sending or receiving.
|
||||
|
@@ -3,6 +3,8 @@
|
||||
#define _LINUX_VIRTIO_NET_H
|
||||
|
||||
#include <linux/if_vlan.h>
|
||||
#include <uapi/linux/tcp.h>
|
||||
#include <uapi/linux/udp.h>
|
||||
#include <uapi/linux/virtio_net.h>
|
||||
|
||||
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
|
||||
@@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
bool little_endian)
|
||||
{
|
||||
unsigned int gso_type = 0;
|
||||
unsigned int thlen = 0;
|
||||
unsigned int ip_proto;
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
case VIRTIO_NET_HDR_GSO_TCPV4:
|
||||
gso_type = SKB_GSO_TCPV4;
|
||||
ip_proto = IPPROTO_TCP;
|
||||
thlen = sizeof(struct tcphdr);
|
||||
break;
|
||||
case VIRTIO_NET_HDR_GSO_TCPV6:
|
||||
gso_type = SKB_GSO_TCPV6;
|
||||
ip_proto = IPPROTO_TCP;
|
||||
thlen = sizeof(struct tcphdr);
|
||||
break;
|
||||
case VIRTIO_NET_HDR_GSO_UDP:
|
||||
gso_type = SKB_GSO_UDP;
|
||||
ip_proto = IPPROTO_UDP;
|
||||
thlen = sizeof(struct udphdr);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
|
||||
if (!skb_partial_csum_set(skb, start, off))
|
||||
return -EINVAL;
|
||||
|
||||
if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* gso packets without NEEDS_CSUM do not set transport_offset.
|
||||
* probe and drop if does not match one of the above types.
|
||||
*/
|
||||
if (gso_type && skb->network_header) {
|
||||
struct flow_keys_basic keys;
|
||||
|
||||
if (!skb->protocol)
|
||||
virtio_net_hdr_set_proto(skb, hdr);
|
||||
retry:
|
||||
skb_probe_transport_header(skb);
|
||||
if (!skb_transport_header_was_set(skb)) {
|
||||
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
|
||||
NULL, 0, 0, 0,
|
||||
0)) {
|
||||
/* UFO does not specify ipv4 or 6: try both */
|
||||
if (gso_type & SKB_GSO_UDP &&
|
||||
skb->protocol == htons(ETH_P_IP)) {
|
||||
@@ -75,6 +91,12 @@ retry:
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (keys.control.thoff + thlen > skb_headlen(skb) ||
|
||||
keys.basic.ip_proto != ip_proto)
|
||||
return -EINVAL;
|
||||
|
||||
skb_set_transport_header(skb, keys.control.thoff);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -48,6 +48,7 @@ struct virtio_vsock_pkt {
|
||||
u32 len;
|
||||
u32 off;
|
||||
bool reply;
|
||||
bool tap_delivered;
|
||||
};
|
||||
|
||||
struct virtio_vsock_pkt_info {
|
||||
|
@@ -137,7 +137,7 @@ extern void vunmap(const void *addr);
|
||||
|
||||
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
||||
unsigned long uaddr, void *kaddr,
|
||||
unsigned long size);
|
||||
unsigned long pgoff, unsigned long size);
|
||||
|
||||
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||
unsigned long pgoff);
|
||||
|
@@ -14,8 +14,10 @@
|
||||
#include <linux/virtio_byteorder.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/slab.h>
|
||||
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/vhost_iotlb.h>
|
||||
#endif
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* virtio_ring with information needed for host access. */
|
||||
@@ -254,6 +256,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
|
||||
return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
|
||||
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
|
||||
|
||||
void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb);
|
||||
|
||||
int vringh_init_iotlb(struct vringh *vrh, u64 features,
|
||||
@@ -284,4 +288,6 @@ void vringh_notify_disable_iotlb(struct vringh *vrh);
|
||||
|
||||
int vringh_need_notify_iotlb(struct vringh *vrh);
|
||||
|
||||
#endif /* CONFIG_VHOST_IOTLB */
|
||||
|
||||
#endif /* _LINUX_VRINGH_H */
|
||||
|
Reference in New Issue
Block a user