Merge tag 'v4.9-rc5' into patchwork

Linux 4.9-rc5

* tag 'v4.9-rc5': (1102 commits)
  Linux 4.9-rc5
  gp8psk: Fix DVB frontend attach
  gp8psk: fix gp8psk_usb_in_op() logic
  dvb-usb: move data_mutex to struct dvb_usb_device
  iio: maxim_thermocouple: detect invalid storage size in read()
  aoe: fix crash in page count manipulation
  lightnvm: invalid offset calculation for lba_shift
  Kbuild: enable -Wmaybe-uninitialized warnings by default
  pcmcia: fix return value of soc_pcmcia_regulator_set
  infiniband: shut up a maybe-uninitialized warning
  crypto: aesni: shut up -Wmaybe-uninitialized warning
  rc: print correct variable for z8f0811
  dib0700: fix nec repeat handling
  s390: pci: don't print uninitialized data for debugging
  nios2: fix timer initcall return value
  x86: apm: avoid uninitialized data
  NFSv4.1: work around -Wmaybe-uninitialized warning
  Kbuild: enable -Wmaybe-uninitialized warning for "make W=1"
  lib/stackdepot: export save/fetch stack for drivers
  mm: kmemleak: scan .data.ro_after_init
  ...
This commit is contained in:
Mauro Carvalho Chehab
2016-11-16 16:42:27 -02:00
1181 changed files with 12279 additions and 7528 deletions

View File

@@ -326,6 +326,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -554,7 +555,8 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)

View File

@@ -258,6 +258,8 @@ struct ceph_watch_item {
struct ceph_entity_addr addr;
};
#define CEPH_LINGER_ID_START 0xffff000000000000ULL
struct ceph_osd_client {
struct ceph_client *client;

View File

@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
* routines, one at of_clk_init(), and one at platform device probe
*/
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
static void name##_of_clk_init_driver(struct device_node *np) \
static void __init name##_of_clk_init_driver(struct device_node *np) \
{ \
of_node_clear_flag(np, OF_POPULATED); \
fn(np); \

View File

@@ -173,12 +173,6 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
#ifdef CONFIG_OF
extern void console_set_by_of(void);
#else
static inline void console_set_by_of(void) {}
#endif
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);

View File

@@ -677,10 +677,10 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
if (best == table - 1)
return pos - table;
return best - pos;
return best - table;
}
return best - pos;
return best - table;
}
/* Works only on sorted freq-tables */

View File

@@ -81,6 +81,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_METAG_TIMER_STARTING,

View File

@@ -106,8 +106,9 @@ static inline void frontswap_invalidate_area(unsigned type)
static inline void frontswap_init(unsigned type, unsigned long *map)
{
if (frontswap_enabled())
__frontswap_init(type, map);
#ifdef CONFIG_FRONTSWAP
__frontswap_init(type, map);
#endif
}
#endif /* _LINUX_FRONTSWAP_H */

View File

@@ -321,6 +321,7 @@ struct writeback_control;
#define IOCB_HIPRI (1 << 3)
#define IOCB_DSYNC (1 << 4)
#define IOCB_SYNC (1 << 5)
#define IOCB_WRITE (1 << 6)
struct kiocb {
struct file *ki_filp;
@@ -1709,7 +1710,6 @@ struct file_operations {
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);

View File

@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
{
const struct kobject *kobj = &device_obj->device.kobj;
return kobj->name;
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,

View File

@@ -141,4 +141,26 @@ enum {
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
/*
* On x86 PAT systems we have memory tracking that keeps track of
* the allowed mappings on memory ranges. This tracking works for
* all the in-kernel mapping APIs (ioremap*), but where the user
* wishes to map a range from a physical device into user memory
* the tracking won't be updated. This API is to be used by
* drivers which remap physical device pages into userspace,
* and wants to make sure they are mapped WC and not UC.
*/
#ifndef arch_io_reserve_memtype_wc
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
resource_size_t size)
{
return 0;
}
static inline void arch_io_free_memtype_wc(resource_size_t base,
resource_size_t size)
{
}
#endif
#endif /* _LINUX_IO_H */

View File

@@ -19,11 +19,15 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/*
* Flags for iomap mappings:
* Flags for all iomap mappings:
*/
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
#define IOMAP_F_SHARED 0x02 /* block shared with another file */
#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
*/
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
/*
* Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
/*
* Flags for iomap_begin / iomap_end. No flag implies a read.
*/
#define IOMAP_WRITE (1 << 0)
#define IOMAP_ZERO (1 << 1)
#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
struct iomap_ops {
/*

View File

@@ -123,12 +123,12 @@ struct inet6_skb_parm {
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
static inline bool skb_l3mdev_slave(__u16 flags)
static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return flags & IP6SKB_L3SLAVE;
}
#else
static inline bool skb_l3mdev_slave(__u16 flags)
static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return false;
}
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
static inline int inet6_iif(const struct sk_buff *skb)
{
bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
/* can not be used in TCP layer after tcp_v6_fill_cb */
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if defined(CONFIG_NET_L3_MASTER_DEV)
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
ipv6_l3mdev_skb(IP6CB(skb)->flags))
return true;
#endif
return false;
}
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};

View File

@@ -290,7 +290,7 @@
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)

View File

@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task);
void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}

View File

@@ -31,7 +31,6 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
#define config_enabled(cfg) ___is_defined(cfg)
#define __is_defined(x) ___is_defined(x)
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
@@ -41,13 +40,13 @@
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
*/
#define IS_BUILTIN(option) config_enabled(option)
#define IS_BUILTIN(option) __is_defined(option)
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise.
*/
#define IS_MODULE(option) config_enabled(option##_MODULE)
#define IS_MODULE(option) __is_defined(option##_MODULE)
/*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled

View File

@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);

View File

@@ -418,8 +418,12 @@ struct mlx5_core_health {
u32 prev;
int miss_counter;
bool sick;
/* wq spinlock to synchronize draining */
spinlock_t wq_lock;
struct workqueue_struct *wq;
unsigned long flags;
struct work_struct work;
struct delayed_work recover_work;
};
struct mlx5_cq_table {
@@ -625,10 +629,6 @@ struct mlx5_db {
int index;
};
enum {
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
};
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -638,13 +638,6 @@ enum {
MLX5_PTYS_EN = 1 << 2,
};
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
__be32 *db_page;
dma_addr_t db_dma;
};
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);

View File

@@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write);
void *buf, int len, unsigned int gup_flags);
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int foll_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, int *locked);
unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags);
struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);
struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
@@ -1306,7 +1302,7 @@ struct frame_vector {
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
bool write, bool force, struct frame_vector *vec);
unsigned int gup_flags, struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
@@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);

View File

@@ -440,33 +440,7 @@ struct zone {
seqlock_t span_seqlock;
#endif
/*
* wait_table -- the array holding the hash table
* wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
* waiting for a page to become available and make them
* runnable again when possible. The trouble is that this
* consumes a lot of space, especially when so few things
* wait on pages at a given time. So instead of using
* per-page waitqueues, we use a waitqueue hash table.
*
* The bucket discipline is to sleep on the same queue when
* colliding and wake all in that wait queue when removing.
* When something wakes, it must check to be sure its page is
* truly available, a la thundering herd. The cost of a
* collision is great, but given the expected load of the
* table, they should be so rare as to be outweighed by the
* benefits from the saved space.
*
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
* primary users of these fields, and in mm/page_alloc.c
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t *wait_table;
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
int initialized;
/* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
static inline bool zone_is_initialized(struct zone *zone)
{
return !!zone->wait_table;
return zone->initialized;
}
static inline bool zone_is_empty(struct zone *zone)

View File

@@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip);
int nand_reset(struct nand_chip *chip, int chipnr);
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);

View File

@@ -2169,7 +2169,10 @@ struct napi_gro_cb {
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
/* 5 bit hole */
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
/* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2180,6 +2183,40 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
#define GRO_RECURSION_LIMIT 15
static inline int gro_recursion_inc_test(struct sk_buff *skb)
{
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
struct sk_buff **head,
struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return cb(head, skb);
}
typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
struct sk_buff *);
static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
struct sock *sk,
struct sk_buff **head,
struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return cb(sk, head, skb);
}
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
@@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
ldev = netdev_all_lower_get_next(dev, &(iter)))
#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
for (iter = (dev)->all_adj_list.lower.next, \
for (iter = &(dev)->all_adj_list.lower, \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
ldev; \
ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))

View File

@@ -16,7 +16,6 @@
#define _LINUX_NVME_H
#include <linux/types.h>
#include <linux/uuid.h>
/* NQN names in commands fields specified one size */
#define NVMF_NQN_FIELD_LEN 256
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
char fr[8];
__u8 rab;
__u8 ieee[3];
__u8 mic;
__u8 cmic;
__u8 mdts;
__le16 cntlid;
__le32 ver;
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
__u8 apsta;
__le16 wctemp;
__le16 cctemp;
__u8 rsvd270[50];
__le16 mtfa;
__le32 hmpre;
__le32 hmmin;
__u8 tnvmcap[16];
__u8 unvmcap[16];
__le32 rpmbs;
__u8 rsvd316[4];
__le16 kas;
__u8 rsvd322[190];
__u8 sqes;
@@ -267,7 +272,7 @@ struct nvme_id_ns {
__le16 nabo;
__le16 nabspf;
__u16 rsvd46;
__le64 nvmcap[2];
__u8 nvmcap[16];
__u8 rsvd64[40];
__u8 nguid[16];
__u8 eui64[8];
@@ -276,6 +281,16 @@ struct nvme_id_ns {
__u8 vs[3712];
};
enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
};
enum {
NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
nvme_admin_set_features = 0x09,
nvme_admin_get_features = 0x0a,
nvme_admin_async_event = 0x0c,
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -583,6 +600,7 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
@@ -745,7 +763,7 @@ struct nvmf_common_command {
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
__u8 adrfam;
__u8 nqntype;
__u8 subtype;
__u8 treq;
__le16 portid;
__le16 cntlid;
@@ -794,7 +812,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
uuid_be hostid;
__u8 hostid[16];
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -905,12 +923,23 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
NVME_SC_INVALID_QUEUE = 0x10c,
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
NVME_SC_FW_NEEDS_RESET = 0x111,
NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
NVME_SC_OVERLAPPING_RANGE = 0x114,
NVME_SC_NS_INSUFFICENT_CAP = 0x115,
NVME_SC_NS_ID_UNAVAILABLE = 0x116,
NVME_SC_NS_ALREADY_ATTACHED = 0x118,
NVME_SC_NS_IS_PRIVATE = 0x119,
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
/*
* I/O Command Set Specific - NVM commands:
@@ -941,6 +970,7 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000,
};
@@ -960,6 +990,7 @@ struct nvme_completion {
__le16 status; /* did the command fail, and if so, why? */
};
#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
#define NVME_VS(major, minor, tertiary) \
(((major) << 16) | ((minor) << 8) | (tertiary))
#endif /* _LINUX_NVME_H */

View File

@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *

View File

@@ -253,6 +253,13 @@ static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
return -ENOSYS;
}
static inline int phy_reset(struct phy *phy)
{
if (!phy)
return 0;
return -ENOSYS;
}
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;

View File

@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;

View File

@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev);

View File

@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -116,22 +117,22 @@ struct reg_sequence {
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
int ret; \
int pollret; \
might_sleep_if(sleep_us); \
for (;;) { \
ret = regmap_read((map), (addr), &(val)); \
if (ret) \
pollret = regmap_read((map), (addr), &(val)); \
if (pollret) \
break; \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
ret = regmap_read((map), (addr), &(val)); \
pollret = regmap_read((map), (addr), &(val)); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
ret ?: ((cond) ? 0 : -ETIMEDOUT); \
pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
#ifdef CONFIG_REGMAP

View File

@@ -936,6 +936,7 @@ struct sk_buff_fclones {
/**
* skb_fclone_busy - check if fclone is busy
* @sk: socket
* @skb: buffer
*
* Returns true if skb is a fast clone, and its clone is not freed.

View File

@@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
unsigned long prot, int pkey);
asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
asmlinkage long sys_pkey_free(int pkey);
//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
// unsigned long flags);
#endif

View File

@@ -13,17 +13,6 @@
struct timespec;
struct compat_timespec;
#ifdef CONFIG_THREAD_INFO_IN_TASK
struct thread_info {
unsigned long flags; /* low level flags */
};
#define INIT_THREAD_INFO(tsk) \
{ \
.flags = 0, \
}
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
#define current_thread_info() ((struct thread_info *)current)
#endif