Merge 4.9-rc5 into staging-next
We want the staging/iio fixes in here as well to resolve issues and merge problems. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
@@ -555,7 +555,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
|
||||
int acpi_device_modalias(struct device *, char *, int);
|
||||
void acpi_walk_dep_device_list(acpi_handle handle);
|
||||
|
||||
struct platform_device *acpi_create_platform_device(struct acpi_device *);
|
||||
struct platform_device *acpi_create_platform_device(struct acpi_device *,
|
||||
struct property_entry *);
|
||||
#define ACPI_PTR(_ptr) (_ptr)
|
||||
|
||||
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
|
||||
|
@@ -258,6 +258,8 @@ struct ceph_watch_item {
|
||||
struct ceph_entity_addr addr;
|
||||
};
|
||||
|
||||
#define CEPH_LINGER_ID_START 0xffff000000000000ULL
|
||||
|
||||
struct ceph_osd_client {
|
||||
struct ceph_client *client;
|
||||
|
||||
|
@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
|
||||
#endif
|
||||
extern bool console_suspend_enabled;
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
extern void console_set_by_of(void);
|
||||
#else
|
||||
static inline void console_set_by_of(void) {}
|
||||
#endif
|
||||
|
||||
/* Suspend and resume console messages over PM events */
|
||||
extern void suspend_console(void);
|
||||
extern void resume_console(void);
|
||||
|
@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
|
||||
|
||||
static inline void frontswap_init(unsigned type, unsigned long *map)
|
||||
{
|
||||
if (frontswap_enabled())
|
||||
__frontswap_init(type, map);
|
||||
#ifdef CONFIG_FRONTSWAP
|
||||
__frontswap_init(type, map);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FRONTSWAP_H */
|
||||
|
@@ -321,6 +321,7 @@ struct writeback_control;
|
||||
#define IOCB_HIPRI (1 << 3)
|
||||
#define IOCB_DSYNC (1 << 4)
|
||||
#define IOCB_SYNC (1 << 5)
|
||||
#define IOCB_WRITE (1 << 6)
|
||||
|
||||
struct kiocb {
|
||||
struct file *ki_filp;
|
||||
@@ -1709,7 +1710,6 @@ struct file_operations {
|
||||
int (*flush) (struct file *, fl_owner_t id);
|
||||
int (*release) (struct inode *, struct file *);
|
||||
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
|
||||
int (*aio_fsync) (struct kiocb *, int datasync);
|
||||
int (*fasync) (int, struct file *, int);
|
||||
int (*lock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
|
||||
|
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
|
||||
const char *mod_name);
|
||||
void vmbus_driver_unregister(struct hv_driver *hv_driver);
|
||||
|
||||
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
|
||||
{
|
||||
const struct kobject *kobj = &device_obj->device.kobj;
|
||||
|
||||
return kobj->name;
|
||||
}
|
||||
|
||||
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
|
||||
|
||||
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
||||
|
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
static inline bool ipv6_l3mdev_skb(__u16 flags)
|
||||
{
|
||||
return flags & IP6SKB_L3SLAVE;
|
||||
}
|
||||
#else
|
||||
static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
static inline bool ipv6_l3mdev_skb(__u16 flags)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
|
||||
static inline int inet6_iif(const struct sk_buff *skb)
|
||||
{
|
||||
bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
|
||||
bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
|
||||
|
||||
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
|
||||
}
|
||||
|
||||
/* can not be used in TCP layer after tcp_v6_fill_cb */
|
||||
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
|
||||
ipv6_l3mdev_skb(IP6CB(skb)->flags))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tcp6_request_sock {
|
||||
struct tcp_request_sock tcp6rsk_tcp;
|
||||
};
|
||||
|
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
|
||||
u32 *lkey, u32 *rkey);
|
||||
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
|
||||
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
|
||||
int mlx4_test_interrupts(struct mlx4_dev *dev);
|
||||
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
|
||||
int mlx4_test_async(struct mlx4_dev *dev);
|
||||
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
|
||||
const u32 offset[], u32 value[],
|
||||
size_t array_len, u8 port);
|
||||
|
@@ -418,8 +418,12 @@ struct mlx5_core_health {
|
||||
u32 prev;
|
||||
int miss_counter;
|
||||
bool sick;
|
||||
/* wq spinlock to synchronize draining */
|
||||
spinlock_t wq_lock;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long flags;
|
||||
struct work_struct work;
|
||||
struct delayed_work recover_work;
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
@@ -625,10 +629,6 @@ struct mlx5_db {
|
||||
int index;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_COMP_EQ_SIZE = 1024,
|
||||
};
|
||||
@@ -638,13 +638,6 @@ enum {
|
||||
MLX5_PTYS_EN = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_db_pgdir {
|
||||
struct list_head list;
|
||||
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
|
||||
__be32 *db_page;
|
||||
dma_addr_t db_dma;
|
||||
};
|
||||
|
||||
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
|
||||
|
||||
struct mlx5_cmd_work_ent {
|
||||
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_health_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||
struct mlx5_buf *buf, int node);
|
||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
||||
|
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
int page);
|
||||
|
||||
/* Reset and initialize a NAND device */
|
||||
int nand_reset(struct nand_chip *chip);
|
||||
int nand_reset(struct nand_chip *chip, int chipnr);
|
||||
|
||||
/* Free resources held by the NAND device */
|
||||
void nand_cleanup(struct nand_chip *chip);
|
||||
|
@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
|
||||
/* Used to determine if flush_id can be ignored */
|
||||
u8 is_atomic:1;
|
||||
|
||||
/* 5 bit hole */
|
||||
/* Number of gro_receive callbacks this packet already went through */
|
||||
u8 recursion_counter:4;
|
||||
|
||||
/* 1 bit hole */
|
||||
|
||||
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
|
||||
__wsum csum;
|
||||
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
|
||||
|
||||
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
|
||||
|
||||
#define GRO_RECURSION_LIMIT 15
|
||||
static inline int gro_recursion_inc_test(struct sk_buff *skb)
|
||||
{
|
||||
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cb(head, skb);
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
|
||||
struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
|
||||
struct sock *sk,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cb(sk, head, skb);
|
||||
}
|
||||
|
||||
struct packet_type {
|
||||
__be16 type; /* This is really htons(ether_type). */
|
||||
struct net_device *dev; /* NULL is wildcarded here */
|
||||
@@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
|
||||
ldev = netdev_all_lower_get_next(dev, &(iter)))
|
||||
|
||||
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
|
||||
for (iter = (dev)->all_adj_list.lower.next, \
|
||||
for (iter = &(dev)->all_adj_list.lower, \
|
||||
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
|
||||
ldev; \
|
||||
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
|
||||
|
@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_reset(struct phy *phy)
|
||||
{
|
||||
if (!phy)
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int phy_get_bus_width(struct phy *phy)
|
||||
{
|
||||
return -ENOSYS;
|
||||
|
@@ -146,6 +146,7 @@ enum qed_led_mode {
|
||||
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
|
||||
|
||||
#define QED_COALESCE_MAX 0xFF
|
||||
#define QED_DEFAULT_RX_USECS 12
|
||||
|
||||
/* forward */
|
||||
struct qed_dev;
|
||||
|
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
|
||||
|
||||
bool qede_roce_supported(struct qede_dev *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
int qede_roce_dev_add(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_open(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_close(struct qede_dev *dev);
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/lockdep.h>
|
||||
@@ -116,22 +117,22 @@ struct reg_sequence {
|
||||
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
|
||||
({ \
|
||||
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
int ret; \
|
||||
int pollret; \
|
||||
might_sleep_if(sleep_us); \
|
||||
for (;;) { \
|
||||
ret = regmap_read((map), (addr), &(val)); \
|
||||
if (ret) \
|
||||
pollret = regmap_read((map), (addr), &(val)); \
|
||||
if (pollret) \
|
||||
break; \
|
||||
if (cond) \
|
||||
break; \
|
||||
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
|
||||
ret = regmap_read((map), (addr), &(val)); \
|
||||
pollret = regmap_read((map), (addr), &(val)); \
|
||||
break; \
|
||||
} \
|
||||
if (sleep_us) \
|
||||
usleep_range((sleep_us >> 2) + 1, sleep_us); \
|
||||
} \
|
||||
ret ?: ((cond) ? 0 : -ETIMEDOUT); \
|
||||
pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_REGMAP
|
||||
|
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
|
||||
|
||||
/**
|
||||
* skb_fclone_busy - check if fclone is busy
|
||||
* @sk: socket
|
||||
* @skb: buffer
|
||||
*
|
||||
* Returns true if skb is a fast clone, and its clone is not freed.
|
||||
|
Reference in New Issue
Block a user