Merge 4.12-rc2 into staging-next
We want the staging tree fixes in here as well to handle the merge issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
@@ -40,6 +40,9 @@ struct bpf_reg_state {
|
||||
*/
|
||||
s64 min_value;
|
||||
u64 max_value;
|
||||
u32 min_align;
|
||||
u32 aux_off;
|
||||
u32 aux_off_align;
|
||||
};
|
||||
|
||||
enum bpf_stack_slot_type {
|
||||
@@ -87,6 +90,7 @@ struct bpf_verifier_env {
|
||||
struct bpf_prog *prog; /* eBPF program being verified */
|
||||
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
|
||||
int stack_size; /* number of states to be processed */
|
||||
bool strict_alignment; /* perform strict pointer alignment checks */
|
||||
struct bpf_verifier_state cur_state; /* current verifier state */
|
||||
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
|
||||
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
|
||||
|
@@ -18,20 +18,6 @@ struct dax_operations {
|
||||
void **, pfn_t *);
|
||||
};
|
||||
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
int __bdev_dax_supported(struct super_block *sb, int blocksize);
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
return __bdev_dax_supported(sb, blocksize);
|
||||
}
|
||||
#else
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_DAX)
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
@@ -46,6 +32,40 @@ static inline void put_dax(struct dax_device *dax_dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
int __bdev_dax_supported(struct super_block *sb, int blocksize);
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
return __bdev_dax_supported(sb, blocksize);
|
||||
}
|
||||
|
||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||
{
|
||||
return dax_get_by_host(host);
|
||||
}
|
||||
|
||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
put_dax(dax_dev);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void fs_put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int dax_read_lock(void);
|
||||
void dax_read_unlock(int id);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
|
@@ -349,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
|
||||
int write, void __user *buffer,
|
||||
size_t *length, loff_t *ppos);
|
||||
#endif
|
||||
extern void wait_for_kprobe_optimizer(void);
|
||||
#else
|
||||
static inline void wait_for_kprobe_optimizer(void) { }
|
||||
#endif /* CONFIG_OPTPROBES */
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
|
@@ -109,7 +109,6 @@ struct mlx5_flow_table_attr {
|
||||
int max_fte;
|
||||
u32 level;
|
||||
u32 flags;
|
||||
u32 underlay_qpn;
|
||||
};
|
||||
|
||||
struct mlx5_flow_table *
|
||||
@@ -167,4 +166,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
|
||||
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
|
||||
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
||||
u64 *bytes, u64 *packets, u64 *lastuse);
|
||||
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
|
||||
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
|
||||
|
||||
#endif
|
||||
|
@@ -3296,11 +3296,15 @@ int dev_get_phys_port_id(struct net_device *dev,
|
||||
int dev_get_phys_port_name(struct net_device *dev,
|
||||
char *name, size_t len);
|
||||
int dev_change_proto_down(struct net_device *dev, bool proto_down);
|
||||
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
||||
int fd, u32 flags);
|
||||
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
|
||||
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
struct netdev_queue *txq, int *ret);
|
||||
|
||||
typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
|
||||
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
||||
int fd, u32 flags);
|
||||
bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op);
|
||||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
bool is_skb_forwardable(const struct net_device *dev,
|
||||
|
@@ -27,8 +27,8 @@
|
||||
|
||||
/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
|
||||
#define FC_PORT_ROLE_NVME_INITIATOR 0x10
|
||||
#define FC_PORT_ROLE_NVME_TARGET 0x11
|
||||
#define FC_PORT_ROLE_NVME_DISCOVERY 0x12
|
||||
#define FC_PORT_ROLE_NVME_TARGET 0x20
|
||||
#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
|
||||
|
||||
|
||||
/**
|
||||
@@ -642,15 +642,7 @@ enum {
|
||||
* sequence in one LLDD operation. Errors during Data
|
||||
* sequence transmit must not allow RSP sequence to be sent.
|
||||
*/
|
||||
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1),
|
||||
/* Bit 1: When 0, the LLDD will deliver FCP CMD
|
||||
* on the CPU it should be affinitized to. Thus work will
|
||||
* be scheduled on the cpu received on. When 1, the LLDD
|
||||
* may not deliver the CMD on the CPU it should be worked
|
||||
* on. The transport should pick a cpu to schedule the work
|
||||
* on.
|
||||
*/
|
||||
NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2),
|
||||
NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
|
||||
/* Bit 2: When 0, the LLDD is calling the cmd rcv handler
|
||||
* in a non-isr context, allowing the transport to finish
|
||||
* op completion in the calling context. When 1, the LLDD
|
||||
@@ -658,7 +650,7 @@ enum {
|
||||
* requiring the transport to transition to a workqueue
|
||||
* for op completion.
|
||||
*/
|
||||
NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3),
|
||||
NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
|
||||
/* Bit 3: When 0, the LLDD is calling the op done handler
|
||||
* in a non-isr context, allowing the transport to finish
|
||||
* op completion in the calling context. When 1, the LLDD
|
||||
|
@@ -8,7 +8,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
|
||||
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
|
||||
|
||||
/*
|
||||
* Workarounds only applied to 32bit powermac machines
|
||||
|
@@ -1,6 +1,11 @@
|
||||
#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__
|
||||
#define __LINUX_SOC_RENESAS_RCAR_RST_H__
|
||||
|
||||
#if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \
|
||||
defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796)
|
||||
int rcar_rst_read_mode_pins(u32 *mode);
|
||||
#else
|
||||
static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
|
||||
|
@@ -148,6 +148,7 @@ struct usb_hcd {
|
||||
unsigned rh_registered:1;/* is root hub registered? */
|
||||
unsigned rh_pollable:1; /* may we poll the root hub? */
|
||||
unsigned msix_enabled:1; /* driver has MSI-X enabled? */
|
||||
unsigned msi_enabled:1; /* driver has MSI enabled? */
|
||||
unsigned remove_phy:1; /* auto-remove USB phy */
|
||||
|
||||
/* The next flag is a stopgap, to be removed when all the HCDs
|
||||
|
Reference in New Issue
Block a user