Merge remote-tracking branch 'net-next/master' into mac80211-next
Merge net-next to resolve a conflict and to get the mac80211 rhashtable fixes so further patches can be applied on top. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
@@ -332,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom(
|
||||
struct ssb_sprom *out));
|
||||
|
||||
struct bcma_bus {
|
||||
struct device *dev;
|
||||
|
||||
/* The MMIO area. */
|
||||
void __iomem *mmio;
|
||||
|
||||
@@ -339,14 +341,7 @@ struct bcma_bus {
|
||||
|
||||
enum bcma_hosttype hosttype;
|
||||
bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
|
||||
union {
|
||||
/* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
|
||||
struct pci_dev *host_pci;
|
||||
/* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
|
||||
struct sdio_func *host_sdio;
|
||||
/* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
|
||||
struct platform_device *host_pdev;
|
||||
};
|
||||
struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */
|
||||
|
||||
struct bcma_chipinfo chipinfo;
|
||||
|
||||
|
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
|
||||
|
||||
static inline sector_t blk_rq_trace_sector(struct request *rq)
|
||||
{
|
||||
return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
|
||||
/*
|
||||
* Tracing should ignore starting sector for passthrough requests and
|
||||
* requests where starting sector didn't get set.
|
||||
*/
|
||||
if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
|
||||
return 0;
|
||||
return blk_rq_pos(rq);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
|
||||
|
@@ -194,6 +194,7 @@ enum bpf_arg_type {
|
||||
ARG_ANYTHING, /* any (initialized) argument is ok */
|
||||
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
|
||||
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
|
||||
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
|
||||
};
|
||||
|
||||
/* type of values returned from helper functions */
|
||||
@@ -203,6 +204,7 @@ enum bpf_return_type {
|
||||
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
|
||||
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
|
||||
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
|
||||
RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
|
||||
};
|
||||
|
||||
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
|
||||
@@ -256,6 +258,10 @@ enum bpf_reg_type {
|
||||
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
|
||||
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
|
||||
PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
|
||||
PTR_TO_SOCK_COMMON, /* reg points to sock_common */
|
||||
PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
|
||||
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
|
||||
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
|
||||
};
|
||||
|
||||
/* The information passed from prog-specific *_is_valid_access
|
||||
@@ -767,8 +773,9 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
|
||||
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
|
||||
|
||||
struct bpf_offload_dev *
|
||||
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
|
||||
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
|
||||
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
|
||||
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
|
||||
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev);
|
||||
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
|
||||
@@ -920,6 +927,9 @@ void bpf_user_rnd_init_once(void);
|
||||
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
#if defined(CONFIG_NET)
|
||||
bool bpf_sock_common_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info);
|
||||
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info);
|
||||
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
@@ -928,6 +938,12 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size);
|
||||
#else
|
||||
static inline bool bpf_sock_common_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool bpf_sock_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
@@ -944,4 +960,31 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info);
|
||||
|
||||
u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size);
|
||||
#else
|
||||
static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
const struct bpf_insn *si,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_INET */
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
@@ -34,6 +34,7 @@
|
||||
#ifndef __has_attribute
|
||||
# define __has_attribute(x) __GCC4_has_attribute_##x
|
||||
# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
|
||||
# define __GCC4_has_attribute___copy__ 0
|
||||
# define __GCC4_has_attribute___designated_init__ 0
|
||||
# define __GCC4_has_attribute___externally_visible__ 1
|
||||
# define __GCC4_has_attribute___noclone__ 1
|
||||
@@ -100,6 +101,19 @@
|
||||
*/
|
||||
#define __attribute_const__ __attribute__((__const__))
|
||||
|
||||
/*
|
||||
* Optional: only supported since gcc >= 9
|
||||
* Optional: not supported by clang
|
||||
* Optional: not supported by icc
|
||||
*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
|
||||
*/
|
||||
#if __has_attribute(__copy__)
|
||||
# define __copy(symbol) __attribute__((__copy__(symbol)))
|
||||
#else
|
||||
# define __copy(symbol)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
|
||||
* attribute warnings entirely and for good") for more information.
|
||||
|
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
||||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology_early(void);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology_early(void) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
#endif
|
||||
|
||||
|
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
|
||||
struct dentry_stat_t {
|
||||
long nr_dentry;
|
||||
long nr_unused;
|
||||
long age_limit; /* age in seconds */
|
||||
long want_pages; /* pages requested by system */
|
||||
long dummy[2];
|
||||
long age_limit; /* age in seconds */
|
||||
long want_pages; /* pages requested by system */
|
||||
long nr_negative; /* # of unused negative dentries */
|
||||
long dummy; /* Reserved for future use */
|
||||
};
|
||||
extern struct dentry_stat_t dentry_stat;
|
||||
|
||||
|
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature)
|
||||
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
|
||||
|
||||
extern bool efi_is_table_address(unsigned long phys_addr);
|
||||
|
||||
extern int efi_apply_persistent_mem_reservations(void);
|
||||
#else
|
||||
static inline bool efi_enabled(int feature)
|
||||
{
|
||||
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int efi_apply_persistent_mem_reservations(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int efi_status_to_err(efi_status_t status);
|
||||
|
@@ -400,4 +400,19 @@ struct ethtool_ops {
|
||||
void (*get_ethtool_phy_stats)(struct net_device *,
|
||||
struct ethtool_stats *, u64 *);
|
||||
};
|
||||
|
||||
struct ethtool_rx_flow_rule {
|
||||
struct flow_rule *rule;
|
||||
unsigned long priv[0];
|
||||
};
|
||||
|
||||
struct ethtool_rx_flow_spec_input {
|
||||
const struct ethtool_rx_flow_spec *fs;
|
||||
u32 rss_ctx;
|
||||
};
|
||||
|
||||
struct ethtool_rx_flow_rule *
|
||||
ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
|
||||
void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
|
||||
|
||||
#endif /* _LINUX_ETHTOOL_H */
|
||||
|
@@ -611,8 +611,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
||||
return qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u8 cb_saved[BPF_SKB_CB_LEN];
|
||||
@@ -631,15 +631,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u32 res;
|
||||
|
||||
preempt_disable();
|
||||
res = __bpf_prog_run_save_cb(prog, skb);
|
||||
preempt_enable();
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u32 res;
|
||||
|
||||
if (unlikely(prog->cb_access))
|
||||
memset(cb_data, 0, BPF_SKB_CB_LEN);
|
||||
|
||||
return BPF_PROG_RUN(prog, skb);
|
||||
preempt_disable();
|
||||
res = BPF_PROG_RUN(prog, skb);
|
||||
preempt_enable();
|
||||
return res;
|
||||
}
|
||||
|
||||
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
|
@@ -1479,11 +1479,12 @@ struct super_block {
|
||||
struct user_namespace *s_user_ns;
|
||||
|
||||
/*
|
||||
* Keep the lru lists last in the structure so they always sit on their
|
||||
* own individual cachelines.
|
||||
* The list_lru structure is essentially just a pointer to a table
|
||||
* of per-node lru lists, each of which has its own spinlock.
|
||||
* There is no need to put them into separate cachelines.
|
||||
*/
|
||||
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
|
||||
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
|
||||
struct list_lru s_dentry_lru;
|
||||
struct list_lru s_inode_lru;
|
||||
struct rcu_head rcu;
|
||||
struct work_struct destroy_work;
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#define __PTP_QORIQ_H__
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
|
||||
/*
|
||||
@@ -49,7 +50,7 @@ struct etts_regs {
|
||||
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
|
||||
};
|
||||
|
||||
struct qoriq_ptp_registers {
|
||||
struct ptp_qoriq_registers {
|
||||
struct ctrl_regs __iomem *ctrl_regs;
|
||||
struct alarm_regs __iomem *alarm_regs;
|
||||
struct fiper_regs __iomem *fiper_regs;
|
||||
@@ -57,15 +58,15 @@ struct qoriq_ptp_registers {
|
||||
};
|
||||
|
||||
/* Offset definitions for the four register groups */
|
||||
#define CTRL_REGS_OFFSET 0x0
|
||||
#define ALARM_REGS_OFFSET 0x40
|
||||
#define FIPER_REGS_OFFSET 0x80
|
||||
#define ETTS_REGS_OFFSET 0xa0
|
||||
#define ETSEC_CTRL_REGS_OFFSET 0x0
|
||||
#define ETSEC_ALARM_REGS_OFFSET 0x40
|
||||
#define ETSEC_FIPER_REGS_OFFSET 0x80
|
||||
#define ETSEC_ETTS_REGS_OFFSET 0xa0
|
||||
|
||||
#define FMAN_CTRL_REGS_OFFSET 0x80
|
||||
#define FMAN_ALARM_REGS_OFFSET 0xb8
|
||||
#define FMAN_FIPER_REGS_OFFSET 0xd0
|
||||
#define FMAN_ETTS_REGS_OFFSET 0xe0
|
||||
#define CTRL_REGS_OFFSET 0x80
|
||||
#define ALARM_REGS_OFFSET 0xb8
|
||||
#define FIPER_REGS_OFFSET 0xd0
|
||||
#define ETTS_REGS_OFFSET 0xe0
|
||||
|
||||
|
||||
/* Bit definitions for the TMR_CTRL register */
|
||||
@@ -136,9 +137,9 @@ struct qoriq_ptp_registers {
|
||||
#define DEFAULT_FIPER1_PERIOD 1000000000
|
||||
#define DEFAULT_FIPER2_PERIOD 100000
|
||||
|
||||
struct qoriq_ptp {
|
||||
struct ptp_qoriq {
|
||||
void __iomem *base;
|
||||
struct qoriq_ptp_registers regs;
|
||||
struct ptp_qoriq_registers regs;
|
||||
spinlock_t lock; /* protects regs */
|
||||
struct ptp_clock *clock;
|
||||
struct ptp_clock_info caps;
|
||||
@@ -156,28 +157,48 @@ struct qoriq_ptp {
|
||||
u32 cksel;
|
||||
u32 tmr_fiper1;
|
||||
u32 tmr_fiper2;
|
||||
u32 (*read)(unsigned __iomem *addr);
|
||||
void (*write)(unsigned __iomem *addr, u32 val);
|
||||
};
|
||||
|
||||
static inline u32 qoriq_read(unsigned __iomem *addr)
|
||||
static inline u32 qoriq_read_be(unsigned __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32be(addr);
|
||||
return val;
|
||||
return ioread32be(addr);
|
||||
}
|
||||
|
||||
static inline void qoriq_write(unsigned __iomem *addr, u32 val)
|
||||
static inline void qoriq_write_be(unsigned __iomem *addr, u32 val)
|
||||
{
|
||||
iowrite32be(val, addr);
|
||||
}
|
||||
|
||||
static inline u32 qoriq_read_le(unsigned __iomem *addr)
|
||||
{
|
||||
return ioread32(addr);
|
||||
}
|
||||
|
||||
static inline void qoriq_write_le(unsigned __iomem *addr, u32 val)
|
||||
{
|
||||
iowrite32(val, addr);
|
||||
}
|
||||
|
||||
irqreturn_t ptp_qoriq_isr(int irq, void *priv);
|
||||
int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
|
||||
const struct ptp_clock_info *caps);
|
||||
void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq);
|
||||
int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
|
||||
int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta);
|
||||
int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
|
||||
int ptp_qoriq_settime(struct ptp_clock_info *ptp,
|
||||
const struct timespec64 *ts);
|
||||
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
|
||||
struct ptp_clock_request *rq, int on);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp);
|
||||
void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp);
|
||||
void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
|
||||
void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
|
||||
#else
|
||||
static inline void ptp_qoriq_create_debugfs(struct qoriq_ptp *qoriq_ptp)
|
||||
static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
|
||||
{ }
|
||||
static inline void ptp_qoriq_remove_debugfs(struct qoriq_ptp *qoriq_ptp)
|
||||
static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
|
@@ -24,7 +24,10 @@
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
#define HID_DEBUG_BUFSIZE 512
|
||||
#define HID_DEBUG_FIFOSIZE 512
|
||||
|
||||
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
|
||||
void hid_dump_report(struct hid_device *, int , u8 *, int);
|
||||
@@ -37,11 +40,8 @@ void hid_debug_init(void);
|
||||
void hid_debug_exit(void);
|
||||
void hid_debug_event(struct hid_device *, char *);
|
||||
|
||||
|
||||
struct hid_debug_list {
|
||||
char *hid_debug_buf;
|
||||
int head;
|
||||
int tail;
|
||||
DECLARE_KFIFO_PTR(hid_debug_fifo, char);
|
||||
struct fasync_struct *fasync;
|
||||
struct hid_device *hdev;
|
||||
struct list_head node;
|
||||
@@ -64,4 +64,3 @@ struct hid_debug_list {
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -615,6 +615,7 @@ struct ide_drive_s {
|
||||
|
||||
/* current sense rq and buffer */
|
||||
bool sense_rq_armed;
|
||||
bool sense_rq_active;
|
||||
struct request *sense_rq;
|
||||
struct request_sense sense_data;
|
||||
|
||||
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
|
||||
extern void ide_timer_expiry(struct timer_list *t);
|
||||
extern irqreturn_t ide_intr(int irq, void *dev_id);
|
||||
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
||||
extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
|
||||
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
|
||||
|
||||
void ide_init_disk(struct gendisk *, ide_drive_t *);
|
||||
|
@@ -319,7 +319,7 @@
|
||||
#define GITS_TYPER_PLPIS (1UL << 0)
|
||||
#define GITS_TYPER_VLPIS (1UL << 1)
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
|
||||
#define GITS_TYPER_IDBITS_SHIFT 8
|
||||
#define GITS_TYPER_DEVBITS_SHIFT 13
|
||||
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
|
||||
|
@@ -261,6 +261,50 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* linkmode_adv_to_mii_10gbt_adv_t
|
||||
* @advertising: the linkmode advertisement settings
|
||||
*
|
||||
* A small helper function that translates linkmode advertisement
|
||||
* settings to phy autonegotiation advertisements for the C45
|
||||
* 10GBASE-T AN CONTROL (7.32) register.
|
||||
*/
|
||||
static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
|
||||
{
|
||||
u32 result = 0;
|
||||
|
||||
if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
|
||||
advertising))
|
||||
result |= MDIO_AN_10GBT_CTRL_ADV2_5G;
|
||||
if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
|
||||
advertising))
|
||||
result |= MDIO_AN_10GBT_CTRL_ADV5G;
|
||||
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
|
||||
advertising))
|
||||
result |= MDIO_AN_10GBT_CTRL_ADV10G;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* mii_10gbt_stat_mod_linkmode_lpa_t
|
||||
* @advertising: target the linkmode advertisement settings
|
||||
* @adv: value of the C45 10GBASE-T AN STATUS register
|
||||
*
|
||||
* A small helper function that translates C45 10GBASE-T AN STATUS register bits
|
||||
* to linkmode advertisement settings. Other bits in advertising aren't changed.
|
||||
*/
|
||||
static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
|
||||
u32 lpa)
|
||||
{
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
|
||||
advertising, lpa & MDIO_AN_10GBT_STAT_LP2_5G);
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
|
||||
advertising, lpa & MDIO_AN_10GBT_STAT_LP5G);
|
||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
|
||||
advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
|
||||
}
|
||||
|
||||
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
|
||||
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
|
||||
|
||||
|
@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
|
||||
*/
|
||||
extern unsigned long long max_possible_pfn;
|
||||
|
||||
#define INIT_MEMBLOCK_REGIONS 128
|
||||
#define INIT_PHYSMEM_REGIONS 4
|
||||
|
||||
/**
|
||||
* enum memblock_flags - definition of memory region attributes
|
||||
* @MEMBLOCK_NONE: no special request
|
||||
|
@@ -21,14 +21,16 @@ struct vmem_altmap;
|
||||
* walkers which rely on the fully initialized page->flags and others
|
||||
* should use this rather than pfn_valid && pfn_to_page
|
||||
*/
|
||||
#define pfn_to_online_page(pfn) \
|
||||
({ \
|
||||
struct page *___page = NULL; \
|
||||
unsigned long ___nr = pfn_to_section_nr(pfn); \
|
||||
\
|
||||
if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
|
||||
___page = pfn_to_page(pfn); \
|
||||
___page; \
|
||||
#define pfn_to_online_page(pfn) \
|
||||
({ \
|
||||
struct page *___page = NULL; \
|
||||
unsigned long ___pfn = pfn; \
|
||||
unsigned long ___nr = pfn_to_section_nr(___pfn); \
|
||||
\
|
||||
if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
|
||||
pfn_valid_within(___pfn)) \
|
||||
___page = pfn_to_page(___pfn); \
|
||||
___page; \
|
||||
})
|
||||
|
||||
/*
|
||||
|
@@ -67,7 +67,7 @@
|
||||
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
|
||||
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
|
||||
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
|
||||
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
|
||||
#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
|
||||
|
||||
/* insert a value to a struct */
|
||||
#define MLX5_SET(typ, p, fld, v) do { \
|
||||
@@ -342,6 +342,8 @@ enum mlx5_event {
|
||||
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
|
||||
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
|
||||
|
||||
MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe,
|
||||
|
||||
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
|
||||
|
||||
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
|
||||
@@ -591,7 +593,7 @@ struct mlx5_eqe_cmd {
|
||||
};
|
||||
|
||||
struct mlx5_eqe_page_req {
|
||||
u8 rsvd0[2];
|
||||
__be16 ec_function;
|
||||
__be16 func_id;
|
||||
__be32 num_pages;
|
||||
__be32 rsvd1[5];
|
||||
@@ -1201,6 +1203,9 @@ enum mlx5_qcam_feature_groups {
|
||||
#define MLX5_CAP_ODP(mdev, cap)\
|
||||
MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
|
||||
|
||||
#define MLX5_CAP_ODP_MAX(mdev, cap)\
|
||||
MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
|
||||
|
||||
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
|
||||
MLX5_GET(vector_calc_cap, \
|
||||
mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
|
||||
|
@@ -522,6 +522,7 @@ struct mlx5_priv {
|
||||
atomic_t reg_pages;
|
||||
struct list_head free_list;
|
||||
int vfs_pages;
|
||||
int peer_pf_pages;
|
||||
|
||||
struct mlx5_core_health health;
|
||||
|
||||
@@ -652,6 +653,7 @@ struct mlx5_core_dev {
|
||||
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
|
||||
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
|
||||
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
|
||||
u8 embedded_cpu;
|
||||
} caps;
|
||||
u64 sys_image_guid;
|
||||
phys_addr_t iseg_base;
|
||||
@@ -850,11 +852,30 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
|
||||
|
||||
struct mlx5_async_ctx {
|
||||
struct mlx5_core_dev *dev;
|
||||
atomic_t num_inflight;
|
||||
struct wait_queue_head wait;
|
||||
};
|
||||
|
||||
struct mlx5_async_work;
|
||||
|
||||
typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
|
||||
|
||||
struct mlx5_async_work {
|
||||
struct mlx5_async_ctx *ctx;
|
||||
mlx5_async_cbk_t user_callback;
|
||||
};
|
||||
|
||||
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
||||
struct mlx5_async_ctx *ctx);
|
||||
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
|
||||
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_async_cbk_t callback,
|
||||
struct mlx5_async_work *work);
|
||||
|
||||
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
int out_size);
|
||||
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||
void *context);
|
||||
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size);
|
||||
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
|
||||
@@ -885,9 +906,10 @@ void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mkey *mkey,
|
||||
u32 *in, int inlen,
|
||||
u32 *out, int outlen,
|
||||
mlx5_cmd_cbk_t callback, void *context);
|
||||
struct mlx5_async_ctx *async_ctx, u32 *in,
|
||||
int inlen, u32 *out, int outlen,
|
||||
mlx5_async_cbk_t callback,
|
||||
struct mlx5_async_work *context);
|
||||
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_mkey *mkey,
|
||||
u32 *in, int inlen);
|
||||
@@ -897,14 +919,12 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
|
||||
u32 *out, int outlen);
|
||||
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
|
||||
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
|
||||
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
|
||||
u16 opmod, u8 port);
|
||||
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s32 npages);
|
||||
s32 npages, bool ec_function);
|
||||
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
|
||||
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
|
||||
void mlx5_register_debugfs(void);
|
||||
@@ -1058,11 +1078,29 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
|
||||
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
|
||||
}
|
||||
|
||||
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
|
||||
#define MLX5_VPORT_MANAGER(mdev) \
|
||||
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
|
||||
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
|
||||
mlx5_core_is_pf(mdev))
|
||||
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
|
||||
}
|
||||
|
||||
static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
|
||||
}
|
||||
|
||||
#define MLX5_HOST_PF_MAX_VFS (127u)
|
||||
static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
if (mlx5_core_is_ecpf_esw_manager(dev))
|
||||
return MLX5_HOST_PF_MAX_VFS;
|
||||
else
|
||||
return pci_sriov_get_totalvfs(dev->pdev);
|
||||
}
|
||||
|
||||
static inline int mlx5_get_gid_table_len(u16 param)
|
||||
{
|
||||
|
@@ -22,6 +22,12 @@ enum {
|
||||
NUM_REP_TYPES,
|
||||
};
|
||||
|
||||
enum {
|
||||
REP_UNREGISTERED,
|
||||
REP_REGISTERED,
|
||||
REP_LOADED,
|
||||
};
|
||||
|
||||
struct mlx5_eswitch_rep;
|
||||
struct mlx5_eswitch_rep_if {
|
||||
int (*load)(struct mlx5_core_dev *dev,
|
||||
@@ -29,7 +35,7 @@ struct mlx5_eswitch_rep_if {
|
||||
void (*unload)(struct mlx5_eswitch_rep *rep);
|
||||
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
|
||||
void *priv;
|
||||
bool valid;
|
||||
u8 state;
|
||||
};
|
||||
|
||||
struct mlx5_eswitch_rep {
|
||||
@@ -40,13 +46,10 @@ struct mlx5_eswitch_rep {
|
||||
u32 vlan_refcount;
|
||||
};
|
||||
|
||||
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index,
|
||||
struct mlx5_eswitch_rep_if *rep_if,
|
||||
u8 rep_type);
|
||||
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index,
|
||||
u8 rep_type);
|
||||
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep_if *rep_if,
|
||||
u8 rep_type);
|
||||
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
|
||||
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
|
||||
int vport,
|
||||
u8 rep_type);
|
||||
|
@@ -72,6 +72,7 @@ enum {
|
||||
|
||||
enum {
|
||||
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
|
||||
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
|
||||
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
|
||||
};
|
||||
|
||||
@@ -141,6 +142,7 @@ enum {
|
||||
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
|
||||
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
|
||||
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
|
||||
MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740,
|
||||
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
|
||||
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
|
||||
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
|
||||
@@ -629,7 +631,8 @@ struct mlx5_ifc_e_switch_cap_bits {
|
||||
u8 vport_svlan_insert[0x1];
|
||||
u8 vport_cvlan_insert_if_not_exist[0x1];
|
||||
u8 vport_cvlan_insert_overwrite[0x1];
|
||||
u8 reserved_at_5[0x17];
|
||||
u8 reserved_at_5[0x16];
|
||||
u8 ecpf_vport_exists[0x1];
|
||||
u8 counter_eswitch_affinity[0x1];
|
||||
u8 merged_eswitch[0x1];
|
||||
u8 nic_vport_node_guid_modify[0x1];
|
||||
@@ -831,7 +834,9 @@ struct mlx5_ifc_odp_cap_bits {
|
||||
|
||||
struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
|
||||
|
||||
u8 reserved_at_e0[0x720];
|
||||
struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
|
||||
|
||||
u8 reserved_at_100[0x700];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_calc_op {
|
||||
@@ -4438,7 +4443,8 @@ struct mlx5_ifc_query_pages_out_bits {
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 embedded_cpu_function[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 num_pages[0x20];
|
||||
@@ -4457,7 +4463,8 @@ struct mlx5_ifc_query_pages_in_bits {
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 embedded_cpu_function[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
@@ -5877,7 +5884,8 @@ struct mlx5_ifc_manage_pages_in_bits {
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 embedded_cpu_function[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 input_num_entries[0x20];
|
||||
@@ -6055,7 +6063,8 @@ struct mlx5_ifc_enable_hca_in_bits {
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 embedded_cpu_function[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
@@ -6099,7 +6108,8 @@ struct mlx5_ifc_disable_hca_in_bits {
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 embedded_cpu_function[0x1];
|
||||
u8 reserved_at_41[0xf];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_at_60[0x20];
|
||||
@@ -7817,21 +7827,23 @@ struct mlx5_ifc_ptys_reg_bits {
|
||||
u8 proto_mask[0x3];
|
||||
|
||||
u8 an_status[0x4];
|
||||
u8 reserved_at_24[0x3c];
|
||||
u8 reserved_at_24[0x1c];
|
||||
|
||||
u8 ext_eth_proto_capability[0x20];
|
||||
|
||||
u8 eth_proto_capability[0x20];
|
||||
|
||||
u8 ib_link_width_capability[0x10];
|
||||
u8 ib_proto_capability[0x10];
|
||||
|
||||
u8 reserved_at_a0[0x20];
|
||||
u8 ext_eth_proto_admin[0x20];
|
||||
|
||||
u8 eth_proto_admin[0x20];
|
||||
|
||||
u8 ib_link_width_admin[0x10];
|
||||
u8 ib_proto_admin[0x10];
|
||||
|
||||
u8 reserved_at_100[0x20];
|
||||
u8 ext_eth_proto_oper[0x20];
|
||||
|
||||
u8 eth_proto_oper[0x20];
|
||||
|
||||
@@ -8280,7 +8292,9 @@ struct mlx5_ifc_mpegc_reg_bits {
|
||||
struct mlx5_ifc_pcam_enhanced_features_bits {
|
||||
u8 reserved_at_0[0x6d];
|
||||
u8 rx_icrc_encapsulated_counter[0x1];
|
||||
u8 reserved_at_6e[0x8];
|
||||
u8 reserved_at_6e[0x4];
|
||||
u8 ptys_extended_ethernet[0x1];
|
||||
u8 reserved_at_73[0x3];
|
||||
u8 pfcc_mask[0x1];
|
||||
u8 reserved_at_77[0x3];
|
||||
u8 per_lane_error_counters[0x1];
|
||||
@@ -8746,7 +8760,8 @@ struct mlx5_ifc_initial_seg_bits {
|
||||
u8 initializing[0x1];
|
||||
u8 reserved_at_fe1[0x4];
|
||||
u8 nic_interface_supported[0x3];
|
||||
u8 reserved_at_fe8[0x18];
|
||||
u8 embedded_cpu[0x1];
|
||||
u8 reserved_at_fe9[0x17];
|
||||
|
||||
struct mlx5_ifc_health_buffer_bits health_buffer;
|
||||
|
||||
@@ -9513,4 +9528,44 @@ struct mlx5_ifc_mtrc_ctrl_bits {
|
||||
u8 reserved_at_80[0x180];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_host_params_context_bits {
|
||||
u8 host_number[0x8];
|
||||
u8 reserved_at_8[0x8];
|
||||
u8 host_num_of_vfs[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 host_pci_bus[0x10];
|
||||
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 host_pci_device[0x10];
|
||||
|
||||
u8 reserved_at_60[0x10];
|
||||
u8 host_pci_function[0x10];
|
||||
|
||||
u8 reserved_at_80[0x180];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_host_params_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_host_params_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
|
||||
struct mlx5_ifc_host_params_context_bits host_params_context;
|
||||
|
||||
u8 reserved_at_280[0x180];
|
||||
};
|
||||
|
||||
#endif /* MLX5_IFC_H */
|
||||
|
@@ -92,6 +92,22 @@ enum mlx5e_link_mode {
|
||||
MLX5E_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
enum mlx5e_ext_link_mode {
|
||||
MLX5E_SGMII_100M = 0,
|
||||
MLX5E_1000BASE_X_SGMII = 1,
|
||||
MLX5E_5GBASE_R = 3,
|
||||
MLX5E_10GBASE_XFI_XAUI_1 = 4,
|
||||
MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5,
|
||||
MLX5E_25GAUI_1_25GBASE_CR_KR = 6,
|
||||
MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7,
|
||||
MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
|
||||
MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
|
||||
MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
|
||||
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
|
||||
MLX5E_400GAUI_8 = 15,
|
||||
MLX5E_EXT_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
enum mlx5e_connector_type {
|
||||
MLX5E_PORT_UNKNOWN = 0,
|
||||
MLX5E_PORT_NONE = 1,
|
||||
@@ -106,31 +122,23 @@ enum mlx5e_connector_type {
|
||||
};
|
||||
|
||||
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
|
||||
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
|
||||
(ext ? MLX5_GET(reg, out, ext_##field) : \
|
||||
MLX5_GET(reg, out, field))
|
||||
|
||||
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
|
||||
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
|
||||
int ptys_size, int proto_mask, u8 local_port);
|
||||
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
|
||||
u32 *proto_cap, int proto_mask);
|
||||
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
|
||||
u32 *proto_admin, int proto_mask);
|
||||
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
|
||||
u8 *link_width_oper, u8 local_port);
|
||||
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
|
||||
u8 *proto_oper, u8 local_port);
|
||||
int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
|
||||
u32 *proto_oper, u8 local_port);
|
||||
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
|
||||
u32 proto_admin, int proto_mask);
|
||||
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
|
||||
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
|
||||
enum mlx5_port_status status);
|
||||
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
|
||||
enum mlx5_port_status *status);
|
||||
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
|
||||
void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
|
||||
u8 *an_status,
|
||||
u8 *an_disable_cap, u8 *an_disable_admin);
|
||||
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
|
||||
|
@@ -36,15 +36,38 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/device.h>
|
||||
|
||||
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
|
||||
#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
|
||||
#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
|
||||
|
||||
#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
|
||||
MLX5_VPORT_UPLINK_PLACEHOLDER + \
|
||||
MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
|
||||
|
||||
#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \
|
||||
mlx5_core_max_vfs(mdev))
|
||||
|
||||
#define MLX5_VPORT_MANAGER(mdev) \
|
||||
(MLX5_CAP_GEN(mdev, vport_group_manager) && \
|
||||
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
|
||||
mlx5_core_is_pf(mdev))
|
||||
|
||||
enum {
|
||||
MLX5_CAP_INLINE_MODE_L2,
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
|
||||
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_VPORT_PF = 0x0,
|
||||
MLX5_VPORT_FIRST_VF = 0x1,
|
||||
MLX5_VPORT_ECPF = 0xfffe,
|
||||
MLX5_VPORT_UPLINK = 0xffff
|
||||
};
|
||||
|
||||
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
|
||||
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
|
||||
u16 vport, u8 state);
|
||||
u16 vport, u8 other_vport, u8 state);
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr);
|
||||
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
|
||||
@@ -60,7 +83,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid);
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
|
||||
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
u32 vport, u64 node_guid);
|
||||
u16 vport, u64 node_guid);
|
||||
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
u16 *qkey_viol_cntr);
|
||||
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
|
||||
@@ -78,7 +101,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
|
||||
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
|
||||
u64 *node_guid);
|
||||
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
||||
u32 vport,
|
||||
u16 vport,
|
||||
enum mlx5_list_type list_type,
|
||||
u8 addr_list[][ETH_ALEN],
|
||||
int *list_size);
|
||||
@@ -87,7 +110,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
||||
u8 addr_list[][ETH_ALEN],
|
||||
int list_size);
|
||||
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
|
||||
u32 vport,
|
||||
u16 vport,
|
||||
int *promisc_uc,
|
||||
int *promisc_mc,
|
||||
int *promisc_all);
|
||||
@@ -96,7 +119,7 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
|
||||
int promisc_mc,
|
||||
int promisc_all);
|
||||
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
|
||||
u32 vport,
|
||||
u16 vport,
|
||||
u16 vlans[],
|
||||
int *size);
|
||||
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
|
||||
@@ -106,7 +129,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
|
||||
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
|
||||
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
|
||||
int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
|
||||
u64 *rx_discard_vport_down,
|
||||
u8 other_vport, u64 *rx_discard_vport_down,
|
||||
u64 *tx_discard_vport_down);
|
||||
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
|
||||
int vf, u8 port_num, void *out,
|
||||
|
@@ -95,6 +95,13 @@ struct page {
|
||||
*/
|
||||
unsigned long private;
|
||||
};
|
||||
struct { /* page_pool used by netstack */
|
||||
/**
|
||||
* @dma_addr: might require a 64-bit value even on
|
||||
* 32-bit architectures.
|
||||
*/
|
||||
dma_addr_t dma_addr;
|
||||
};
|
||||
struct { /* slab, slob and slub */
|
||||
union {
|
||||
struct list_head slab_list; /* uses lru */
|
||||
|
@@ -308,6 +308,7 @@ struct mmc_card {
|
||||
unsigned int nr_parts;
|
||||
|
||||
unsigned int bouncesz; /* Bounce buffer size */
|
||||
struct workqueue_struct *complete_wq; /* Private workqueue */
|
||||
};
|
||||
|
||||
static inline bool mmc_large_sector(struct mmc_card *card)
|
||||
|
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
|
||||
#define module_init(initfn) \
|
||||
static inline initcall_t __maybe_unused __inittest(void) \
|
||||
{ return initfn; } \
|
||||
int init_module(void) __attribute__((alias(#initfn)));
|
||||
int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
|
||||
|
||||
/* This is only required if you want to be unloadable. */
|
||||
#define module_exit(exitfn) \
|
||||
static inline exitcall_t __maybe_unused __exittest(void) \
|
||||
{ return exitfn; } \
|
||||
void cleanup_module(void) __attribute__((alias(#exitfn)));
|
||||
void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -11,6 +11,8 @@
|
||||
#define _LINUX_NETDEV_FEATURES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
typedef u64 netdev_features_t;
|
||||
|
||||
@@ -154,8 +156,26 @@ enum {
|
||||
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
|
||||
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
|
||||
|
||||
#define for_each_netdev_feature(mask_addr, bit) \
|
||||
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
|
||||
/* Finds the next feature with the highest number of the range of start till 0.
|
||||
*/
|
||||
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
|
||||
{
|
||||
/* like BITMAP_LAST_WORD_MASK() for u64
|
||||
* this sets the most significant 64 - start to 0.
|
||||
*/
|
||||
feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
|
||||
|
||||
return fls64(feature) - 1;
|
||||
}
|
||||
|
||||
/* This goes for the MSB to the LSB through the set feature bits,
|
||||
* mask_addr should be a u64 and bit an int
|
||||
*/
|
||||
#define for_each_netdev_feature(mask_addr, bit) \
|
||||
for ((bit) = find_next_netdev_feature((mask_addr), \
|
||||
NETDEV_FEATURE_COUNT); \
|
||||
(bit) >= 0; \
|
||||
(bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
|
||||
|
||||
/* Features valid for ethtool to change */
|
||||
/* = all defined minus driver/device-class-related */
|
||||
|
@@ -868,7 +868,6 @@ enum bpf_netdev_command {
|
||||
/* BPF program for offload callbacks, invoked at program load time. */
|
||||
BPF_OFFLOAD_MAP_ALLOC,
|
||||
BPF_OFFLOAD_MAP_FREE,
|
||||
XDP_QUERY_XSK_UMEM,
|
||||
XDP_SETUP_XSK_UMEM,
|
||||
};
|
||||
|
||||
@@ -895,10 +894,10 @@ struct netdev_bpf {
|
||||
struct {
|
||||
struct bpf_offloaded_map *offmap;
|
||||
};
|
||||
/* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
|
||||
/* XDP_SETUP_XSK_UMEM */
|
||||
struct {
|
||||
struct xdp_umem *umem; /* out for query*/
|
||||
u16 queue_id; /* in for query */
|
||||
struct xdp_umem *umem;
|
||||
u16 queue_id;
|
||||
} xsk;
|
||||
};
|
||||
};
|
||||
@@ -1188,6 +1187,10 @@ struct dev_ifalias {
|
||||
* not implement this, it is assumed that the hw is not able to have
|
||||
* multiple net devices on single physical port.
|
||||
*
|
||||
* int (*ndo_get_port_parent_id)(struct net_device *dev,
|
||||
* struct netdev_phys_item_id *ppid)
|
||||
* Called to get the parent ID of the physical port of this device.
|
||||
*
|
||||
* void (*ndo_udp_tunnel_add)(struct net_device *dev,
|
||||
* struct udp_tunnel_info *ti);
|
||||
* Called by UDP tunnel to notify a driver about the UDP port and socket
|
||||
@@ -1412,6 +1415,8 @@ struct net_device_ops {
|
||||
bool new_carrier);
|
||||
int (*ndo_get_phys_port_id)(struct net_device *dev,
|
||||
struct netdev_phys_item_id *ppid);
|
||||
int (*ndo_get_port_parent_id)(struct net_device *dev,
|
||||
struct netdev_phys_item_id *ppid);
|
||||
int (*ndo_get_phys_port_name)(struct net_device *dev,
|
||||
char *name, size_t len);
|
||||
void (*ndo_udp_tunnel_add)(struct net_device *dev,
|
||||
@@ -1486,6 +1491,7 @@ struct net_device_ops {
|
||||
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
|
||||
* @IFF_FAILOVER: device is a failover master device
|
||||
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
|
||||
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
|
||||
*/
|
||||
enum netdev_priv_flags {
|
||||
IFF_802_1Q_VLAN = 1<<0,
|
||||
@@ -1517,6 +1523,7 @@ enum netdev_priv_flags {
|
||||
IFF_NO_RX_HANDLER = 1<<26,
|
||||
IFF_FAILOVER = 1<<27,
|
||||
IFF_FAILOVER_SLAVE = 1<<28,
|
||||
IFF_L3MDEV_RX_HANDLER = 1<<29,
|
||||
};
|
||||
|
||||
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
|
||||
@@ -1547,6 +1554,7 @@ enum netdev_priv_flags {
|
||||
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
|
||||
#define IFF_FAILOVER IFF_FAILOVER
|
||||
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
|
||||
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
|
||||
|
||||
/**
|
||||
* struct net_device - The DEVICE structure.
|
||||
@@ -3651,6 +3659,9 @@ int dev_get_phys_port_id(struct net_device *dev,
|
||||
struct netdev_phys_item_id *ppid);
|
||||
int dev_get_phys_port_name(struct net_device *dev,
|
||||
char *name, size_t len);
|
||||
int dev_get_port_parent_id(struct net_device *dev,
|
||||
struct netdev_phys_item_id *ppid, bool recurse);
|
||||
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
|
||||
int dev_change_proto_down(struct net_device *dev, bool proto_down);
|
||||
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
|
||||
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
@@ -4552,6 +4563,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
|
||||
return dev->priv_flags & IFF_SUPP_NOFCS;
|
||||
}
|
||||
|
||||
static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
|
||||
}
|
||||
|
||||
static inline bool netif_is_l3_master(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_L3MDEV_MASTER;
|
||||
|
@@ -25,23 +25,24 @@ struct nf_queue_entry;
|
||||
* if IPv6 is a module.
|
||||
*/
|
||||
struct nf_ipv6_ops {
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
|
||||
const struct net_device *dev, int strict);
|
||||
int (*route_me_harder)(struct net *net, struct sk_buff *skb);
|
||||
int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
|
||||
const struct in6_addr *daddr, unsigned int srcprefs,
|
||||
struct in6_addr *saddr);
|
||||
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict);
|
||||
#endif
|
||||
void (*route_input)(struct sk_buff *skb);
|
||||
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
int (*output)(struct net *, struct sock *, struct sk_buff *));
|
||||
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
|
||||
bool strict);
|
||||
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
|
||||
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
|
||||
int ipv6_netfilter_init(void);
|
||||
void ipv6_netfilter_fini(void);
|
||||
#include <net/addrconf.h>
|
||||
|
||||
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
|
||||
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
|
||||
@@ -49,6 +50,49 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
|
||||
return rcu_dereference(nf_ipv6_ops);
|
||||
}
|
||||
|
||||
static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
|
||||
const struct net_device *dev, int strict)
|
||||
{
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
|
||||
|
||||
if (!v6_ops)
|
||||
return 1;
|
||||
|
||||
return v6_ops->chk_addr(net, addr, dev, strict);
|
||||
#else
|
||||
return ipv6_chk_addr(net, addr, dev, strict);
|
||||
#endif
|
||||
}
|
||||
|
||||
int __nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict);
|
||||
|
||||
static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
|
||||
struct flowi *fl, bool strict)
|
||||
{
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
|
||||
|
||||
if (v6ops)
|
||||
return v6ops->route(net, dst, fl, strict);
|
||||
|
||||
return -EHOSTUNREACH;
|
||||
#endif
|
||||
#if IS_BUILTIN(CONFIG_IPV6)
|
||||
return __nf_ip6_route(net, dst, fl, strict);
|
||||
#else
|
||||
return -EHOSTUNREACH;
|
||||
#endif
|
||||
}
|
||||
|
||||
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
|
||||
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
unsigned int dataoff, u_int8_t protocol);
|
||||
|
||||
int ipv6_netfilter_init(void);
|
||||
void ipv6_netfilter_fini(void);
|
||||
|
||||
#else /* CONFIG_NETFILTER */
|
||||
static inline int ipv6_netfilter_init(void) { return 0; }
|
||||
static inline void ipv6_netfilter_fini(void) { return; }
|
||||
|
@@ -6,14 +6,19 @@
|
||||
|
||||
struct objagg_ops {
|
||||
size_t obj_size;
|
||||
bool (*delta_check)(void *priv, const void *parent_obj,
|
||||
const void *obj);
|
||||
int (*hints_obj_cmp)(const void *obj1, const void *obj2);
|
||||
void * (*delta_create)(void *priv, void *parent_obj, void *obj);
|
||||
void (*delta_destroy)(void *priv, void *delta_priv);
|
||||
void * (*root_create)(void *priv, void *obj);
|
||||
void * (*root_create)(void *priv, void *obj, unsigned int root_id);
|
||||
#define OBJAGG_OBJ_ROOT_ID_INVALID UINT_MAX
|
||||
void (*root_destroy)(void *priv, void *root_priv);
|
||||
};
|
||||
|
||||
struct objagg;
|
||||
struct objagg_obj;
|
||||
struct objagg_hints;
|
||||
|
||||
const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
|
||||
const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
|
||||
@@ -21,7 +26,8 @@ const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
|
||||
|
||||
struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
|
||||
void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
|
||||
struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
|
||||
struct objagg *objagg_create(const struct objagg_ops *ops,
|
||||
struct objagg_hints *hints, void *priv);
|
||||
void objagg_destroy(struct objagg *objagg);
|
||||
|
||||
struct objagg_obj_stats {
|
||||
@@ -36,6 +42,7 @@ struct objagg_obj_stats_info {
|
||||
};
|
||||
|
||||
struct objagg_stats {
|
||||
unsigned int root_count;
|
||||
unsigned int stats_info_count;
|
||||
struct objagg_obj_stats_info stats_info[];
|
||||
};
|
||||
@@ -43,4 +50,14 @@ struct objagg_stats {
|
||||
const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
|
||||
void objagg_stats_put(const struct objagg_stats *objagg_stats);
|
||||
|
||||
enum objagg_opt_algo_type {
|
||||
OBJAGG_OPT_ALGO_SIMPLE_GREEDY,
|
||||
};
|
||||
|
||||
struct objagg_hints *objagg_hints_get(struct objagg *objagg,
|
||||
enum objagg_opt_algo_type opt_algo_type);
|
||||
void objagg_hints_put(struct objagg_hints *objagg_hints);
|
||||
const struct objagg_stats *
|
||||
objagg_hints_stats_get(struct objagg_hints *objagg_hints);
|
||||
|
||||
#endif
|
||||
|
@@ -447,6 +447,11 @@ struct pmu {
|
||||
* Filter events for PMU-specific reasons.
|
||||
*/
|
||||
int (*filter_match) (struct perf_event *event); /* optional */
|
||||
|
||||
/*
|
||||
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
||||
*/
|
||||
int (*check_period) (struct perf_event *event, u64 value); /* optional */
|
||||
};
|
||||
|
||||
enum perf_addr_filter_action_t {
|
||||
|
@@ -502,6 +502,12 @@ struct phy_driver {
|
||||
*/
|
||||
int (*probe)(struct phy_device *phydev);
|
||||
|
||||
/*
|
||||
* Probe the hardware to determine what abilities it has.
|
||||
* Should only set phydev->supported.
|
||||
*/
|
||||
int (*get_features)(struct phy_device *phydev);
|
||||
|
||||
/* PHY Power Management */
|
||||
int (*suspend)(struct phy_device *phydev);
|
||||
int (*resume)(struct phy_device *phydev);
|
||||
@@ -667,13 +673,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
|
||||
bool exact);
|
||||
size_t phy_speeds(unsigned int *speeds, size_t size,
|
||||
unsigned long *mask);
|
||||
|
||||
static inline bool __phy_is_started(struct phy_device *phydev)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&phydev->lock));
|
||||
|
||||
return phydev->state >= PHY_UP;
|
||||
}
|
||||
void of_set_phy_supported(struct phy_device *phydev);
|
||||
void of_set_phy_eee_broken(struct phy_device *phydev);
|
||||
|
||||
/**
|
||||
* phy_is_started - Convenience function to check whether PHY is started
|
||||
@@ -681,28 +682,11 @@ static inline bool __phy_is_started(struct phy_device *phydev)
|
||||
*/
|
||||
static inline bool phy_is_started(struct phy_device *phydev)
|
||||
{
|
||||
bool started;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
started = __phy_is_started(phydev);
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
return started;
|
||||
return phydev->state >= PHY_UP;
|
||||
}
|
||||
|
||||
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
|
||||
|
||||
/**
|
||||
* phy_read_mmd - Convenience function for reading a register
|
||||
* from an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to read from
|
||||
* @regnum: The register on the MMD to read
|
||||
*
|
||||
* Same rules as for phy_read();
|
||||
*/
|
||||
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
|
||||
|
||||
/**
|
||||
* phy_read - Convenience function for reading a given PHY register
|
||||
* @phydev: the phy_device struct
|
||||
@@ -758,9 +742,68 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
|
||||
val);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_read_mmd - Convenience function for reading a register
|
||||
* from an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to read from
|
||||
* @regnum: The register on the MMD to read
|
||||
*
|
||||
* Same rules as for phy_read();
|
||||
*/
|
||||
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
|
||||
|
||||
/**
|
||||
* __phy_read_mmd - Convenience function for reading a register
|
||||
* from an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to read from
|
||||
* @regnum: The register on the MMD to read
|
||||
*
|
||||
* Same rules as for __phy_read();
|
||||
*/
|
||||
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
|
||||
|
||||
/**
|
||||
* phy_write_mmd - Convenience function for writing a register
|
||||
* on an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to write to
|
||||
* @regnum: The register on the MMD to read
|
||||
* @val: value to write to @regnum
|
||||
*
|
||||
* Same rules as for phy_write();
|
||||
*/
|
||||
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
|
||||
|
||||
/**
|
||||
* __phy_write_mmd - Convenience function for writing a register
|
||||
* on an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to write to
|
||||
* @regnum: The register on the MMD to read
|
||||
* @val: value to write to @regnum
|
||||
*
|
||||
* Same rules as for __phy_write();
|
||||
*/
|
||||
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
|
||||
|
||||
int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
|
||||
u16 set);
|
||||
int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
|
||||
u16 set);
|
||||
int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
|
||||
int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
|
||||
|
||||
int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
|
||||
u16 mask, u16 set);
|
||||
int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
|
||||
u16 mask, u16 set);
|
||||
int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
|
||||
u16 mask, u16 set);
|
||||
int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
|
||||
u16 mask, u16 set);
|
||||
|
||||
/**
|
||||
* __phy_set_bits - Convenience function for setting bits in a PHY register
|
||||
* @phydev: the phy_device struct
|
||||
@@ -810,6 +853,66 @@ static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
|
||||
return phy_modify(phydev, regnum, val, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* __phy_set_bits_mmd - Convenience function for setting bits in a register
|
||||
* on MMD
|
||||
* @phydev: the phy_device struct
|
||||
* @devad: the MMD containing register to modify
|
||||
* @regnum: register number to modify
|
||||
* @val: bits to set
|
||||
*
|
||||
* The caller must have taken the MDIO bus lock.
|
||||
*/
|
||||
static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad,
|
||||
u32 regnum, u16 val)
|
||||
{
|
||||
return __phy_modify_mmd(phydev, devad, regnum, 0, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* __phy_clear_bits_mmd - Convenience function for clearing bits in a register
|
||||
* on MMD
|
||||
* @phydev: the phy_device struct
|
||||
* @devad: the MMD containing register to modify
|
||||
* @regnum: register number to modify
|
||||
* @val: bits to clear
|
||||
*
|
||||
* The caller must have taken the MDIO bus lock.
|
||||
*/
|
||||
static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad,
|
||||
u32 regnum, u16 val)
|
||||
{
|
||||
return __phy_modify_mmd(phydev, devad, regnum, val, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_set_bits_mmd - Convenience function for setting bits in a register
|
||||
* on MMD
|
||||
* @phydev: the phy_device struct
|
||||
* @devad: the MMD containing register to modify
|
||||
* @regnum: register number to modify
|
||||
* @val: bits to set
|
||||
*/
|
||||
static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad,
|
||||
u32 regnum, u16 val)
|
||||
{
|
||||
return phy_modify_mmd(phydev, devad, regnum, 0, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_clear_bits_mmd - Convenience function for clearing bits in a register
|
||||
* on MMD
|
||||
* @phydev: the phy_device struct
|
||||
* @devad: the MMD containing register to modify
|
||||
* @regnum: register number to modify
|
||||
* @val: bits to clear
|
||||
*/
|
||||
static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
|
||||
u32 regnum, u16 val)
|
||||
{
|
||||
return phy_modify_mmd(phydev, devad, regnum, val, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_interrupt_is_valid - Convenience function for testing a given PHY irq
|
||||
* @phydev: the phy_device struct
|
||||
@@ -886,18 +989,6 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
|
||||
return phydev->is_pseudo_fixed_link;
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_write_mmd - Convenience function for writing a register
|
||||
* on an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to read from
|
||||
* @regnum: The register on the MMD to read
|
||||
* @val: value to write to @regnum
|
||||
*
|
||||
* Same rules as for phy_write();
|
||||
*/
|
||||
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
|
||||
|
||||
int phy_save_page(struct phy_device *phydev);
|
||||
int phy_select_page(struct phy_device *phydev, int page);
|
||||
int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
|
||||
@@ -986,6 +1077,7 @@ void phy_attached_info(struct phy_device *phydev);
|
||||
int genphy_config_init(struct phy_device *phydev);
|
||||
int genphy_setup_forced(struct phy_device *phydev);
|
||||
int genphy_restart_aneg(struct phy_device *phydev);
|
||||
int genphy_config_eee_advert(struct phy_device *phydev);
|
||||
int genphy_config_aneg(struct phy_device *phydev);
|
||||
int genphy_aneg_done(struct phy_device *phydev);
|
||||
int genphy_update_link(struct phy_device *phydev);
|
||||
@@ -1005,13 +1097,16 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
|
||||
|
||||
/* Clause 45 PHY */
|
||||
int genphy_c45_restart_aneg(struct phy_device *phydev);
|
||||
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart);
|
||||
int genphy_c45_aneg_done(struct phy_device *phydev);
|
||||
int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask);
|
||||
int genphy_c45_read_link(struct phy_device *phydev);
|
||||
int genphy_c45_read_lpa(struct phy_device *phydev);
|
||||
int genphy_c45_read_pma(struct phy_device *phydev);
|
||||
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
|
||||
int genphy_c45_an_config_aneg(struct phy_device *phydev);
|
||||
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
|
||||
int genphy_c45_read_mdix(struct phy_device *phydev);
|
||||
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
|
||||
|
||||
/* The gen10g_* functions are the old Clause 45 stub */
|
||||
int gen10g_config_aneg(struct phy_device *phydev);
|
||||
|
@@ -19,6 +19,12 @@ extern int fixed_phy_add(unsigned int irq, int phy_id,
|
||||
extern struct phy_device *fixed_phy_register(unsigned int irq,
|
||||
struct fixed_phy_status *status,
|
||||
struct device_node *np);
|
||||
|
||||
extern struct phy_device *
|
||||
fixed_phy_register_with_gpiod(unsigned int irq,
|
||||
struct fixed_phy_status *status,
|
||||
struct gpio_desc *gpiod);
|
||||
|
||||
extern void fixed_phy_unregister(struct phy_device *phydev);
|
||||
extern int fixed_phy_set_link_update(struct phy_device *phydev,
|
||||
int (*link_update)(struct net_device *,
|
||||
@@ -35,6 +41,15 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq,
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct phy_device *
|
||||
fixed_phy_register_with_gpiod(unsigned int irq,
|
||||
struct fixed_phy_status *status,
|
||||
struct gpio_desc *gpiod)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void fixed_phy_unregister(struct phy_device *phydev)
|
||||
{
|
||||
}
|
||||
|
@@ -220,6 +220,7 @@ void phylink_ethtool_get_pauseparam(struct phylink *,
|
||||
int phylink_ethtool_set_pauseparam(struct phylink *,
|
||||
struct ethtool_pauseparam *);
|
||||
int phylink_get_eee_err(struct phylink *);
|
||||
int phylink_init_eee(struct phylink *, bool);
|
||||
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
|
||||
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
|
||||
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
|
||||
|
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
|
||||
|
||||
static inline void pm_runtime_mark_last_busy(struct device *dev)
|
||||
{
|
||||
WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
|
||||
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
|
||||
}
|
||||
|
||||
static inline bool pm_runtime_is_irq_safe(struct device *dev)
|
||||
|
@@ -26,7 +26,6 @@
|
||||
#include <linux/cache.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/errno.h>
|
||||
#endif
|
||||
|
@@ -643,6 +643,7 @@ struct qed_dev_info {
|
||||
u16 mtu;
|
||||
|
||||
bool wol_support;
|
||||
bool smart_an;
|
||||
|
||||
/* MBI version */
|
||||
u32 mbi_version;
|
||||
|
@@ -995,7 +995,7 @@ struct task_struct {
|
||||
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
|
||||
struct list_head cg_list;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_RESCTRL
|
||||
#ifdef CONFIG_X86_CPU_RESCTRL
|
||||
u32 closid;
|
||||
u32 rmid;
|
||||
#endif
|
||||
|
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
|
||||
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
|
||||
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
||||
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
||||
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
|
||||
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
||||
|
||||
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
||||
|
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
|
||||
#endif
|
||||
|
||||
#define siginmask(sig, mask) \
|
||||
((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
|
||||
((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
|
||||
|
||||
#define SIG_KERNEL_ONLY_MASK (\
|
||||
rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
|
||||
|
@@ -1889,12 +1889,12 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
|
||||
*
|
||||
* A buffer cannot be placed on two lists at the same time.
|
||||
*/
|
||||
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
|
||||
static inline void __skb_queue_head(struct sk_buff_head *list,
|
||||
struct sk_buff *newsk)
|
||||
{
|
||||
__skb_queue_after(list, (struct sk_buff *)list, newsk);
|
||||
}
|
||||
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
|
||||
|
||||
/**
|
||||
* __skb_queue_tail - queue a buffer at the list tail
|
||||
@@ -1906,12 +1906,12 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
|
||||
*
|
||||
* A buffer cannot be placed on two lists at the same time.
|
||||
*/
|
||||
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
|
||||
static inline void __skb_queue_tail(struct sk_buff_head *list,
|
||||
struct sk_buff *newsk)
|
||||
{
|
||||
__skb_queue_before(list, (struct sk_buff *)list, newsk);
|
||||
}
|
||||
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
|
||||
|
||||
/*
|
||||
* remove sk_buff from list. _Must_ be called atomically, and with
|
||||
@@ -1938,7 +1938,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
|
||||
* so must be used with appropriate locks held only. The head item is
|
||||
* returned or %NULL if the list is empty.
|
||||
*/
|
||||
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
|
||||
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *skb = skb_peek(list);
|
||||
@@ -1946,6 +1945,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
|
||||
__skb_unlink(skb, list);
|
||||
return skb;
|
||||
}
|
||||
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
|
||||
|
||||
/**
|
||||
* __skb_dequeue_tail - remove from the tail of the queue
|
||||
@@ -1955,7 +1955,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
|
||||
* so must be used with appropriate locks held only. The tail item is
|
||||
* returned or %NULL if the list is empty.
|
||||
*/
|
||||
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
|
||||
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *skb = skb_peek_tail(list);
|
||||
@@ -1963,6 +1962,7 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
|
||||
__skb_unlink(skb, list);
|
||||
return skb;
|
||||
}
|
||||
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
|
||||
|
||||
|
||||
static inline bool skb_is_nonlinear(const struct sk_buff *skb)
|
||||
@@ -2439,7 +2439,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
|
||||
|
||||
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
|
||||
skb_set_transport_header(skb, keys.control.thoff);
|
||||
else
|
||||
else if (offset_hint >= 0)
|
||||
skb_set_transport_header(skb, offset_hint);
|
||||
}
|
||||
|
||||
@@ -2653,13 +2653,13 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
|
||||
* the list and one reference dropped. This function does not take the
|
||||
* list lock and the caller must hold the relevant locks to use it.
|
||||
*/
|
||||
void skb_queue_purge(struct sk_buff_head *list);
|
||||
static inline void __skb_queue_purge(struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
while ((skb = __skb_dequeue(list)) != NULL)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
void skb_queue_purge(struct sk_buff_head *list);
|
||||
|
||||
unsigned int skb_rbtree_purge(struct rb_root *root);
|
||||
|
||||
@@ -3028,7 +3028,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_put_padto - increase size and pad an skbuff up to a minimal size
|
||||
* __skb_put_padto - increase size and pad an skbuff up to a minimal size
|
||||
* @skb: buffer to pad
|
||||
* @len: minimal length
|
||||
* @free_on_error: free buffer on error
|
||||
@@ -4235,6 +4235,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
|
||||
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
|
||||
}
|
||||
|
||||
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
|
||||
{
|
||||
return skb_is_gso(skb) &&
|
||||
skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
|
||||
}
|
||||
|
||||
static inline void skb_gso_reset(struct sk_buff *skb)
|
||||
{
|
||||
skb_shinfo(skb)->gso_size = 0;
|
||||
|
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
|
||||
struct clk *pclk;
|
||||
struct clk *clk_ptp_ref;
|
||||
unsigned int clk_ptp_rate;
|
||||
unsigned int clk_ref_rate;
|
||||
struct reset_control *stmmac_rst;
|
||||
struct stmmac_axi *axi;
|
||||
int has_gmac4;
|
||||
|
@@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
|
||||
if (!skb_partial_csum_set(skb, start, off))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* gso packets without NEEDS_CSUM do not set transport_offset.
|
||||
* probe and drop if does not match one of the above types.
|
||||
*/
|
||||
if (gso_type) {
|
||||
skb_probe_transport_header(skb, -1);
|
||||
if (!skb_transport_header_was_set(skb))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
|
Reference in New Issue
Block a user