Backmerge tag 'v4.9-rc4' into drm-next
Linux 4.9-rc4 This is needed for nouveau development.
This commit is contained in:
@@ -326,6 +326,7 @@ struct pci_dev;
|
||||
int acpi_pci_irq_enable (struct pci_dev *dev);
|
||||
void acpi_penalize_isa_irq(int irq, int active);
|
||||
bool acpi_isa_irq_available(int irq);
|
||||
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
|
||||
void acpi_pci_irq_disable (struct pci_dev *dev);
|
||||
|
||||
extern int ec_read(u8 addr, u8 *val);
|
||||
|
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
|
||||
* routines, one at of_clk_init(), and one at platform device probe
|
||||
*/
|
||||
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
|
||||
static void name##_of_clk_init_driver(struct device_node *np) \
|
||||
static void __init name##_of_clk_init_driver(struct device_node *np) \
|
||||
{ \
|
||||
of_node_clear_flag(np, OF_POPULATED); \
|
||||
fn(np); \
|
||||
|
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
|
||||
const char *mod_name);
|
||||
void vmbus_driver_unregister(struct hv_driver *hv_driver);
|
||||
|
||||
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
|
||||
{
|
||||
const struct kobject *kobj = &device_obj->device.kobj;
|
||||
|
||||
return kobj->name;
|
||||
}
|
||||
|
||||
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
|
||||
|
||||
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
||||
|
@@ -141,4 +141,26 @@ enum {
|
||||
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
|
||||
void memunmap(void *addr);
|
||||
|
||||
/*
|
||||
* On x86 PAT systems we have memory tracking that keeps track of
|
||||
* the allowed mappings on memory ranges. This tracking works for
|
||||
* all the in-kernel mapping APIs (ioremap*), but where the user
|
||||
* wishes to map a range from a physical device into user memory
|
||||
* the tracking won't be updated. This API is to be used by
|
||||
* drivers which remap physical device pages into userspace,
|
||||
* and wants to make sure they are mapped WC and not UC.
|
||||
*/
|
||||
#ifndef arch_io_reserve_memtype_wc
|
||||
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_io_free_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_IO_H */
|
||||
|
@@ -19,11 +19,15 @@ struct vm_fault;
|
||||
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
|
||||
|
||||
/*
|
||||
* Flags for iomap mappings:
|
||||
* Flags for all iomap mappings:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x02 /* block shared with another file */
|
||||
#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
|
||||
/*
|
||||
* Flags that only need to be reported for IOMAP_REPORT requests:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
|
||||
|
||||
/*
|
||||
* Magic value for blkno:
|
||||
@@ -42,8 +46,9 @@ struct iomap {
|
||||
/*
|
||||
* Flags for iomap_begin / iomap_end. No flag implies a read.
|
||||
*/
|
||||
#define IOMAP_WRITE (1 << 0)
|
||||
#define IOMAP_ZERO (1 << 1)
|
||||
#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
|
||||
#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
|
||||
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
|
||||
|
||||
struct iomap_ops {
|
||||
/*
|
||||
|
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
static inline bool ipv6_l3mdev_skb(__u16 flags)
|
||||
{
|
||||
return flags & IP6SKB_L3SLAVE;
|
||||
}
|
||||
#else
|
||||
static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
static inline bool ipv6_l3mdev_skb(__u16 flags)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
|
||||
static inline int inet6_iif(const struct sk_buff *skb)
|
||||
{
|
||||
bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
|
||||
bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
|
||||
|
||||
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
|
||||
}
|
||||
|
||||
/* can not be used in TCP layer after tcp_v6_fill_cb */
|
||||
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
|
||||
ipv6_l3mdev_skb(IP6CB(skb)->flags))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tcp6_request_sock {
|
||||
struct tcp_request_sock tcp6rsk_tcp;
|
||||
};
|
||||
|
@@ -31,7 +31,6 @@
|
||||
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
|
||||
* the last step cherry picks the 2nd arg, we get a zero.
|
||||
*/
|
||||
#define config_enabled(cfg) ___is_defined(cfg)
|
||||
#define __is_defined(x) ___is_defined(x)
|
||||
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
|
||||
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
|
||||
@@ -41,13 +40,13 @@
|
||||
* otherwise. For boolean options, this is equivalent to
|
||||
* IS_ENABLED(CONFIG_FOO).
|
||||
*/
|
||||
#define IS_BUILTIN(option) config_enabled(option)
|
||||
#define IS_BUILTIN(option) __is_defined(option)
|
||||
|
||||
/*
|
||||
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
|
||||
* otherwise.
|
||||
*/
|
||||
#define IS_MODULE(option) config_enabled(option##_MODULE)
|
||||
#define IS_MODULE(option) __is_defined(option##_MODULE)
|
||||
|
||||
/*
|
||||
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
|
||||
|
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
|
||||
u32 *lkey, u32 *rkey);
|
||||
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
|
||||
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
|
||||
int mlx4_test_interrupts(struct mlx4_dev *dev);
|
||||
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
|
||||
int mlx4_test_async(struct mlx4_dev *dev);
|
||||
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
|
||||
const u32 offset[], u32 value[],
|
||||
size_t array_len, u8 port);
|
||||
|
@@ -418,8 +418,12 @@ struct mlx5_core_health {
|
||||
u32 prev;
|
||||
int miss_counter;
|
||||
bool sick;
|
||||
/* wq spinlock to synchronize draining */
|
||||
spinlock_t wq_lock;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long flags;
|
||||
struct work_struct work;
|
||||
struct delayed_work recover_work;
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
@@ -625,10 +629,6 @@ struct mlx5_db {
|
||||
int index;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_COMP_EQ_SIZE = 1024,
|
||||
};
|
||||
@@ -638,13 +638,6 @@ enum {
|
||||
MLX5_PTYS_EN = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_db_pgdir {
|
||||
struct list_head list;
|
||||
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
|
||||
__be32 *db_page;
|
||||
dma_addr_t db_dma;
|
||||
};
|
||||
|
||||
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
|
||||
|
||||
struct mlx5_cmd_work_ent {
|
||||
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_health_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||
struct mlx5_buf *buf, int node);
|
||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
||||
|
@@ -1271,10 +1271,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
|
||||
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int foll_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas, int *nonblocking);
|
||||
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
|
@@ -440,33 +440,7 @@ struct zone {
|
||||
seqlock_t span_seqlock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* wait_table -- the array holding the hash table
|
||||
* wait_table_hash_nr_entries -- the size of the hash table array
|
||||
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
||||
*
|
||||
* The purpose of all these is to keep track of the people
|
||||
* waiting for a page to become available and make them
|
||||
* runnable again when possible. The trouble is that this
|
||||
* consumes a lot of space, especially when so few things
|
||||
* wait on pages at a given time. So instead of using
|
||||
* per-page waitqueues, we use a waitqueue hash table.
|
||||
*
|
||||
* The bucket discipline is to sleep on the same queue when
|
||||
* colliding and wake all in that wait queue when removing.
|
||||
* When something wakes, it must check to be sure its page is
|
||||
* truly available, a la thundering herd. The cost of a
|
||||
* collision is great, but given the expected load of the
|
||||
* table, they should be so rare as to be outweighed by the
|
||||
* benefits from the saved space.
|
||||
*
|
||||
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
||||
* primary users of these fields, and in mm/page_alloc.c
|
||||
* free_area_init_core() performs the initialization of them.
|
||||
*/
|
||||
wait_queue_head_t *wait_table;
|
||||
unsigned long wait_table_hash_nr_entries;
|
||||
unsigned long wait_table_bits;
|
||||
int initialized;
|
||||
|
||||
/* Write-intensive fields used from the page allocator */
|
||||
ZONE_PADDING(_pad1_)
|
||||
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
|
||||
|
||||
static inline bool zone_is_initialized(struct zone *zone)
|
||||
{
|
||||
return !!zone->wait_table;
|
||||
return zone->initialized;
|
||||
}
|
||||
|
||||
static inline bool zone_is_empty(struct zone *zone)
|
||||
|
@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
int page);
|
||||
|
||||
/* Reset and initialize a NAND device */
|
||||
int nand_reset(struct nand_chip *chip);
|
||||
int nand_reset(struct nand_chip *chip, int chipnr);
|
||||
|
||||
/* Free resources held by the NAND device */
|
||||
void nand_cleanup(struct nand_chip *chip);
|
||||
|
@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
|
||||
/* Used to determine if flush_id can be ignored */
|
||||
u8 is_atomic:1;
|
||||
|
||||
/* 5 bit hole */
|
||||
/* Number of gro_receive callbacks this packet already went through */
|
||||
u8 recursion_counter:4;
|
||||
|
||||
/* 1 bit hole */
|
||||
|
||||
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
|
||||
__wsum csum;
|
||||
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
|
||||
|
||||
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
|
||||
|
||||
#define GRO_RECURSION_LIMIT 15
|
||||
static inline int gro_recursion_inc_test(struct sk_buff *skb)
|
||||
{
|
||||
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cb(head, skb);
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
|
||||
struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
|
||||
struct sock *sk,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cb(sk, head, skb);
|
||||
}
|
||||
|
||||
struct packet_type {
|
||||
__be16 type; /* This is really htons(ether_type). */
|
||||
struct net_device *dev; /* NULL is wildcarded here */
|
||||
@@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
|
||||
ldev = netdev_all_lower_get_next(dev, &(iter)))
|
||||
|
||||
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
|
||||
for (iter = (dev)->all_adj_list.lower.next, \
|
||||
for (iter = &(dev)->all_adj_list.lower, \
|
||||
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
|
||||
ldev; \
|
||||
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
|
||||
|
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
|
||||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_disable_local(struct perf_event *event);
|
||||
extern void perf_event_disable_inatomic(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else /* !CONFIG_PERF_EVENTS: */
|
||||
static inline void *
|
||||
|
@@ -146,6 +146,7 @@ enum qed_led_mode {
|
||||
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
|
||||
|
||||
#define QED_COALESCE_MAX 0xFF
|
||||
#define QED_DEFAULT_RX_USECS 12
|
||||
|
||||
/* forward */
|
||||
struct qed_dev;
|
||||
|
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
|
||||
|
||||
bool qede_roce_supported(struct qede_dev *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
int qede_roce_dev_add(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_open(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_close(struct qede_dev *dev);
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/lockdep.h>
|
||||
@@ -116,22 +117,22 @@ struct reg_sequence {
|
||||
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
|
||||
({ \
|
||||
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
int ret; \
|
||||
int pollret; \
|
||||
might_sleep_if(sleep_us); \
|
||||
for (;;) { \
|
||||
ret = regmap_read((map), (addr), &(val)); \
|
||||
if (ret) \
|
||||
pollret = regmap_read((map), (addr), &(val)); \
|
||||
if (pollret) \
|
||||
break; \
|
||||
if (cond) \
|
||||
break; \
|
||||
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
|
||||
ret = regmap_read((map), (addr), &(val)); \
|
||||
pollret = regmap_read((map), (addr), &(val)); \
|
||||
break; \
|
||||
} \
|
||||
if (sleep_us) \
|
||||
usleep_range((sleep_us >> 2) + 1, sleep_us); \
|
||||
} \
|
||||
ret ?: ((cond) ? 0 : -ETIMEDOUT); \
|
||||
pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_REGMAP
|
||||
|
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
|
||||
|
||||
/**
|
||||
* skb_fclone_busy - check if fclone is busy
|
||||
* @sk: socket
|
||||
* @skb: buffer
|
||||
*
|
||||
* Returns true if skb is a fast clone, and its clone is not freed.
|
||||
|
Reference in New Issue
Block a user