Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from David Miller: 1) Add WireGuard 2) Add HE and TWT support to ath11k driver, from John Crispin. 3) Add ESP in TCP encapsulation support, from Sabrina Dubroca. 4) Add variable window congestion control to TIPC, from Jon Maloy. 5) Add BCM84881 PHY driver, from Russell King. 6) Start adding netlink support for ethtool operations, from Michal Kubecek. 7) Add XDP drop and TX action support to ena driver, from Sameeh Jubran. 8) Add new ipv4 route notifications so that mlxsw driver does not have to handle identical routes itself. From Ido Schimmel. 9) Add BPF dynamic program extensions, from Alexei Starovoitov. 10) Support RX and TX timestamping in igc, from Vinicius Costa Gomes. 11) Add support for macsec HW offloading, from Antoine Tenart. 12) Add initial support for MPTCP protocol, from Christoph Paasch, Matthieu Baerts, Florian Westphal, Peter Krystad, and many others. 13) Add Octeontx2 PF support, from Sunil Goutham, Geetha sowjanya, Linu Cherian, and others. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1469 commits) net: phy: add default ARCH_BCM_IPROC for MDIO_BCM_IPROC udp: segment looped gso packets correctly netem: change mailing list qed: FW 8.42.2.0 debug features qed: rt init valid initialization changed qed: Debug feature: ilt and mdump qed: FW 8.42.2.0 Add fw overlay feature qed: FW 8.42.2.0 HSI changes qed: FW 8.42.2.0 iscsi/fcoe changes qed: Add abstraction for different hsi values per chip qed: FW 8.42.2.0 Additional ll2 type qed: Use dmae to write to widebus registers in fw_funcs qed: FW 8.42.2.0 Parser offsets modified qed: FW 8.42.2.0 Queue Manager changes qed: FW 8.42.2.0 Expose new registers and change windows qed: FW 8.42.2.0 Internal ram offsets modifications MAINTAINERS: Add entry for Marvell OcteonTX2 Physical Function driver Documentation: net: octeontx2: Add RVU HW and drivers overview octeontx2-pf: ethtool RSS config support octeontx2-pf: Add basic ethtool support ...
This commit is contained in:
@@ -21,8 +21,6 @@ config SFC
|
||||
depends on PCI
|
||||
select MDIO
|
||||
select CRC32
|
||||
select I2C
|
||||
select I2C_ALGOBIT
|
||||
imply PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports 10/40-gigabit Ethernet cards based on
|
||||
|
@@ -1,7 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
|
||||
selftest.o ethtool.o ptp.o tx_tso.o \
|
||||
mcdi.o mcdi_port.o mcdi_mon.o
|
||||
sfc-y += efx.o efx_common.o efx_channels.o nic.o \
|
||||
farch.o siena.o ef10.o \
|
||||
tx.o tx_common.o tx_tso.o rx.o rx_common.o \
|
||||
selftest.o ethtool.o ethtool_common.o ptp.o \
|
||||
mcdi.o mcdi_port.o mcdi_port_common.o \
|
||||
mcdi_functions.o mcdi_filters.o mcdi_mon.o
|
||||
sfc-$(CONFIG_SFC_MTD) += mtd.o
|
||||
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -522,10 +522,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
|
||||
|
||||
if (!is_zero_ether_addr(mac)) {
|
||||
rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
|
||||
if (rc) {
|
||||
eth_zero_addr(vf->mac);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (vf->efx)
|
||||
ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -15,31 +15,17 @@ int efx_net_open(struct net_device *net_dev);
|
||||
int efx_net_stop(struct net_device *net_dev);
|
||||
|
||||
/* TX */
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *net_dev);
|
||||
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
|
||||
void *type_data);
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
|
||||
extern unsigned int efx_piobuf_size;
|
||||
extern bool efx_separate_tx_channels;
|
||||
|
||||
/* RX */
|
||||
void efx_set_default_rx_indir_table(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx);
|
||||
void efx_rx_config_page_split(struct efx_nic *efx);
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
|
||||
void efx_rx_slow_fill(struct timer_list *t);
|
||||
void __efx_rx_packet(struct efx_channel *channel);
|
||||
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
unsigned int n_frags, unsigned int len, u16 flags);
|
||||
@@ -48,7 +34,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
|
||||
if (channel->rx_pkt_n_frags)
|
||||
__efx_rx_packet(channel);
|
||||
}
|
||||
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
||||
|
||||
#define EFX_MAX_DMAQ_SIZE 4096UL
|
||||
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
|
||||
@@ -80,8 +65,6 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
|
||||
|
||||
/* Filters */
|
||||
|
||||
void efx_mac_reconfigure(struct efx_nic *efx);
|
||||
|
||||
/**
|
||||
* efx_filter_insert_filter - add or replace a filter
|
||||
* @efx: NIC in which to insert the filter
|
||||
@@ -186,58 +169,17 @@ static inline void efx_filter_rfs_expire(struct work_struct *data)
|
||||
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
|
||||
#define efx_filter_rfs_enabled() 0
|
||||
#endif
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
|
||||
|
||||
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right);
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
|
||||
bool *force);
|
||||
|
||||
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec);
|
||||
|
||||
/* @new is written to indicate if entry was newly added (true) or if an old
|
||||
* entry was found and returned (false).
|
||||
*/
|
||||
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec,
|
||||
bool *new);
|
||||
|
||||
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
|
||||
#endif
|
||||
|
||||
/* RSS contexts */
|
||||
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
|
||||
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
|
||||
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
|
||||
static inline bool efx_rss_active(struct efx_rss_context *ctx)
|
||||
{
|
||||
return ctx->context_id != EFX_EF10_RSS_CONTEXT_INVALID;
|
||||
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
|
||||
}
|
||||
|
||||
/* Channels */
|
||||
int efx_channel_dummy_op_int(struct efx_channel *channel);
|
||||
void efx_channel_dummy_op_void(struct efx_channel *channel);
|
||||
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
|
||||
|
||||
/* Ports */
|
||||
int efx_reconfigure_port(struct efx_nic *efx);
|
||||
int __efx_reconfigure_port(struct efx_nic *efx);
|
||||
|
||||
/* Ethtool support */
|
||||
extern const struct ethtool_ops efx_ethtool_ops;
|
||||
|
||||
/* Reset handling */
|
||||
int efx_reset(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_reset_down(struct efx_nic *efx, enum reset_type method);
|
||||
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
|
||||
int efx_try_recovery(struct efx_nic *efx);
|
||||
|
||||
/* Global */
|
||||
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
|
||||
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
|
||||
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
|
||||
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
|
||||
@@ -245,8 +187,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
|
||||
bool rx_may_override_tx);
|
||||
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
|
||||
unsigned int *rx_usecs, bool *rx_adaptive);
|
||||
void efx_stop_eventq(struct efx_channel *channel);
|
||||
void efx_start_eventq(struct efx_channel *channel);
|
||||
|
||||
/* Dummy PHY ops for PHY drivers */
|
||||
int efx_port_dummy_op_int(struct efx_nic *efx);
|
||||
@@ -293,9 +233,6 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
||||
efx_schedule_channel(channel);
|
||||
}
|
||||
|
||||
void efx_link_status_changed(struct efx_nic *efx);
|
||||
void efx_link_set_advertising(struct efx_nic *efx,
|
||||
const unsigned long *advertising);
|
||||
void efx_link_clear_advertising(struct efx_nic *efx);
|
||||
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
|
||||
|
||||
|
1234
drivers/net/ethernet/sfc/efx_channels.c
Normal file
1234
drivers/net/ethernet/sfc/efx_channels.c
Normal file
File diff suppressed because it is too large
Load Diff
55
drivers/net/ethernet/sfc/efx_channels.h
Normal file
55
drivers/net/ethernet/sfc/efx_channels.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_CHANNELS_H
|
||||
#define EFX_CHANNELS_H
|
||||
|
||||
int efx_probe_interrupts(struct efx_nic *efx);
|
||||
void efx_remove_interrupts(struct efx_nic *efx);
|
||||
int efx_soft_enable_interrupts(struct efx_nic *efx);
|
||||
void efx_soft_disable_interrupts(struct efx_nic *efx);
|
||||
int efx_enable_interrupts(struct efx_nic *efx);
|
||||
void efx_disable_interrupts(struct efx_nic *efx);
|
||||
|
||||
void efx_set_interrupt_affinity(struct efx_nic *efx);
|
||||
void efx_clear_interrupt_affinity(struct efx_nic *efx);
|
||||
|
||||
int efx_probe_eventq(struct efx_channel *channel);
|
||||
int efx_init_eventq(struct efx_channel *channel);
|
||||
void efx_start_eventq(struct efx_channel *channel);
|
||||
void efx_stop_eventq(struct efx_channel *channel);
|
||||
void efx_fini_eventq(struct efx_channel *channel);
|
||||
void efx_remove_eventq(struct efx_channel *channel);
|
||||
|
||||
struct efx_channel *
|
||||
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
|
||||
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
|
||||
void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
|
||||
void efx_set_channel_names(struct efx_nic *efx);
|
||||
int efx_init_channels(struct efx_nic *efx);
|
||||
int efx_probe_channels(struct efx_nic *efx);
|
||||
int efx_set_channels(struct efx_nic *efx);
|
||||
bool efx_default_channel_want_txqs(struct efx_channel *channel);
|
||||
void efx_remove_channel(struct efx_channel *channel);
|
||||
void efx_remove_channels(struct efx_nic *efx);
|
||||
void efx_fini_channels(struct efx_nic *efx);
|
||||
struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
|
||||
void efx_start_channels(struct efx_nic *efx);
|
||||
void efx_stop_channels(struct efx_nic *efx);
|
||||
|
||||
void efx_init_napi_channel(struct efx_channel *channel);
|
||||
void efx_init_napi(struct efx_nic *efx);
|
||||
void efx_fini_napi_channel(struct efx_channel *channel);
|
||||
void efx_fini_napi(struct efx_nic *efx);
|
||||
|
||||
int efx_channel_dummy_op_int(struct efx_channel *channel);
|
||||
void efx_channel_dummy_op_void(struct efx_channel *channel);
|
||||
|
||||
#endif
|
1102
drivers/net/ethernet/sfc/efx_common.c
Normal file
1102
drivers/net/ethernet/sfc/efx_common.c
Normal file
File diff suppressed because it is too large
Load Diff
73
drivers/net/ethernet/sfc/efx_common.h
Normal file
73
drivers/net/ethernet/sfc/efx_common.h
Normal file
@@ -0,0 +1,73 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_COMMON_H
|
||||
#define EFX_COMMON_H
|
||||
|
||||
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
|
||||
unsigned int mem_map_size);
|
||||
void efx_fini_io(struct efx_nic *efx, int bar);
|
||||
int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
|
||||
struct net_device *net_dev);
|
||||
void efx_fini_struct(struct efx_nic *efx);
|
||||
|
||||
void efx_start_all(struct efx_nic *efx);
|
||||
void efx_stop_all(struct efx_nic *efx);
|
||||
|
||||
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
|
||||
|
||||
int efx_create_reset_workqueue(void);
|
||||
void efx_queue_reset_work(struct efx_nic *efx);
|
||||
void efx_flush_reset_workqueue(struct efx_nic *efx);
|
||||
void efx_destroy_reset_workqueue(void);
|
||||
|
||||
void efx_start_monitor(struct efx_nic *efx);
|
||||
|
||||
int __efx_reconfigure_port(struct efx_nic *efx);
|
||||
int efx_reconfigure_port(struct efx_nic *efx);
|
||||
|
||||
#define EFX_ASSERT_RESET_SERIALISED(efx) \
|
||||
do { \
|
||||
if ((efx->state == STATE_READY) || \
|
||||
(efx->state == STATE_RECOVERY) || \
|
||||
(efx->state == STATE_DISABLED)) \
|
||||
ASSERT_RTNL(); \
|
||||
} while (0)
|
||||
|
||||
int efx_try_recovery(struct efx_nic *efx);
|
||||
void efx_reset_down(struct efx_nic *efx, enum reset_type method);
|
||||
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
|
||||
int efx_reset(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
|
||||
|
||||
static inline int efx_check_disabled(struct efx_nic *efx)
|
||||
{
|
||||
if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"device is disabled due to earlier errors\n");
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_LOGGING
|
||||
void efx_init_mcdi_logging(struct efx_nic *efx);
|
||||
void efx_fini_mcdi_logging(struct efx_nic *efx);
|
||||
#else
|
||||
static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
|
||||
static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
void efx_mac_reconfigure(struct efx_nic *efx);
|
||||
void efx_link_status_changed(struct efx_nic *efx);
|
||||
unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
|
||||
int efx_change_mtu(struct net_device *net_dev, int new_mtu);
|
||||
|
||||
#endif
|
@@ -13,92 +13,13 @@
|
||||
#include "workarounds.h"
|
||||
#include "selftest.h"
|
||||
#include "efx.h"
|
||||
#include "efx_channels.h"
|
||||
#include "rx_common.h"
|
||||
#include "tx_common.h"
|
||||
#include "ethtool_common.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
|
||||
struct efx_sw_stat_desc {
|
||||
const char *name;
|
||||
enum {
|
||||
EFX_ETHTOOL_STAT_SOURCE_nic,
|
||||
EFX_ETHTOOL_STAT_SOURCE_channel,
|
||||
EFX_ETHTOOL_STAT_SOURCE_tx_queue
|
||||
} source;
|
||||
unsigned offset;
|
||||
u64(*get_stat) (void *field); /* Reader function */
|
||||
};
|
||||
|
||||
/* Initialiser for a struct efx_sw_stat_desc with type-checking */
|
||||
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
|
||||
get_stat_function) { \
|
||||
.name = #stat_name, \
|
||||
.source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
|
||||
.offset = ((((field_type *) 0) == \
|
||||
&((struct efx_##source_name *)0)->field) ? \
|
||||
offsetof(struct efx_##source_name, field) : \
|
||||
offsetof(struct efx_##source_name, field)), \
|
||||
.get_stat = get_stat_function, \
|
||||
}
|
||||
|
||||
static u64 efx_get_uint_stat(void *field)
|
||||
{
|
||||
return *(unsigned int *)field;
|
||||
}
|
||||
|
||||
static u64 efx_get_atomic_stat(void *field)
|
||||
{
|
||||
return atomic_read((atomic_t *) field);
|
||||
}
|
||||
|
||||
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
|
||||
EFX_ETHTOOL_STAT(field, nic, field, \
|
||||
atomic_t, efx_get_atomic_stat)
|
||||
|
||||
#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
|
||||
EFX_ETHTOOL_STAT(field, channel, n_##field, \
|
||||
unsigned int, efx_get_uint_stat)
|
||||
#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
|
||||
EFX_ETHTOOL_STAT(field, channel, field, \
|
||||
unsigned int, efx_get_uint_stat)
|
||||
|
||||
#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
|
||||
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
|
||||
unsigned int, efx_get_uint_stat)
|
||||
|
||||
static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
|
||||
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
|
||||
#endif
|
||||
};
|
||||
|
||||
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
|
||||
|
||||
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
|
||||
|
||||
/**************************************************************************
|
||||
@@ -185,18 +106,6 @@ efx_ethtool_set_link_ksettings(struct net_device *net_dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
|
||||
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
|
||||
efx_mcdi_print_fwver(efx, info->fw_version,
|
||||
sizeof(info->fw_version));
|
||||
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
|
||||
{
|
||||
return efx_nic_get_regs_len(netdev_priv(net_dev));
|
||||
@@ -211,341 +120,6 @@ static void efx_ethtool_get_regs(struct net_device *net_dev,
|
||||
efx_nic_get_regs(efx, buf);
|
||||
}
|
||||
|
||||
static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
return efx->msg_enable;
|
||||
}
|
||||
|
||||
static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
efx->msg_enable = msg_enable;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_fill_test - fill in an individual self-test entry
|
||||
* @test_index: Index of the test
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
* @test: Pointer to test result (used only if data != %NULL)
|
||||
* @unit_format: Unit name format (e.g. "chan\%d")
|
||||
* @unit_id: Unit id (e.g. 0 for "chan0")
|
||||
* @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
|
||||
* @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
|
||||
*
|
||||
* Fill in an individual self-test entry.
|
||||
*/
|
||||
static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
|
||||
int *test, const char *unit_format, int unit_id,
|
||||
const char *test_format, const char *test_id)
|
||||
{
|
||||
char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
|
||||
|
||||
/* Fill data value, if applicable */
|
||||
if (data)
|
||||
data[test_index] = *test;
|
||||
|
||||
/* Fill string, if applicable */
|
||||
if (strings) {
|
||||
if (strchr(unit_format, '%'))
|
||||
snprintf(unit_str, sizeof(unit_str),
|
||||
unit_format, unit_id);
|
||||
else
|
||||
strcpy(unit_str, unit_format);
|
||||
snprintf(test_str, sizeof(test_str), test_format, test_id);
|
||||
snprintf(strings + test_index * ETH_GSTRING_LEN,
|
||||
ETH_GSTRING_LEN,
|
||||
"%-6s %-24s", unit_str, test_str);
|
||||
}
|
||||
}
|
||||
|
||||
#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
|
||||
#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
|
||||
#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
|
||||
#define EFX_LOOPBACK_NAME(_mode, _counter) \
|
||||
"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
|
||||
|
||||
/**
|
||||
* efx_fill_loopback_test - fill in a block of loopback self-test entries
|
||||
* @efx: Efx NIC
|
||||
* @lb_tests: Efx loopback self-test results structure
|
||||
* @mode: Loopback test mode
|
||||
* @test_index: Starting index of the test
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
*
|
||||
* Fill in a block of loopback self-test entries. Return new test
|
||||
* index.
|
||||
*/
|
||||
static int efx_fill_loopback_test(struct efx_nic *efx,
|
||||
struct efx_loopback_self_tests *lb_tests,
|
||||
enum efx_loopback_mode mode,
|
||||
unsigned int test_index,
|
||||
u8 *strings, u64 *data)
|
||||
{
|
||||
struct efx_channel *channel =
|
||||
efx_get_channel(efx, efx->tx_channel_offset);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->tx_sent[tx_queue->queue],
|
||||
EFX_TX_QUEUE_NAME(tx_queue),
|
||||
EFX_LOOPBACK_NAME(mode, "tx_sent"));
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->tx_done[tx_queue->queue],
|
||||
EFX_TX_QUEUE_NAME(tx_queue),
|
||||
EFX_LOOPBACK_NAME(mode, "tx_done"));
|
||||
}
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->rx_good,
|
||||
"rx", 0,
|
||||
EFX_LOOPBACK_NAME(mode, "rx_good"));
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->rx_bad,
|
||||
"rx", 0,
|
||||
EFX_LOOPBACK_NAME(mode, "rx_bad"));
|
||||
|
||||
return test_index;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_ethtool_fill_self_tests - get self-test details
|
||||
* @efx: Efx NIC
|
||||
* @tests: Efx self-test results structure, or %NULL
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
*
|
||||
* Get self-test number of strings, strings, and/or test results.
|
||||
* Return number of strings (== number of test results).
|
||||
*
|
||||
* The reason for merging these three functions is to make sure that
|
||||
* they can never be inconsistent.
|
||||
*/
|
||||
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests,
|
||||
u8 *strings, u64 *data)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int n = 0, i;
|
||||
enum efx_loopback_mode mode;
|
||||
|
||||
efx_fill_test(n++, strings, data, &tests->phy_alive,
|
||||
"phy", 0, "alive", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->nvram,
|
||||
"core", 0, "nvram", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->interrupt,
|
||||
"core", 0, "interrupt", NULL);
|
||||
|
||||
/* Event queues */
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_dma[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.dma", NULL);
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_int[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.int", NULL);
|
||||
}
|
||||
|
||||
efx_fill_test(n++, strings, data, &tests->memory,
|
||||
"core", 0, "memory", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->registers,
|
||||
"core", 0, "registers", NULL);
|
||||
|
||||
if (efx->phy_op->run_tests != NULL) {
|
||||
EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
|
||||
|
||||
for (i = 0; true; ++i) {
|
||||
const char *name;
|
||||
|
||||
EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
|
||||
name = efx->phy_op->test_name(efx, i);
|
||||
if (name == NULL)
|
||||
break;
|
||||
|
||||
efx_fill_test(n++, strings, data, &tests->phy_ext[i],
|
||||
"phy", 0, name, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Loopback tests */
|
||||
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(efx->loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
n = efx_fill_loopback_test(efx,
|
||||
&tests->loopback[mode], mode, n,
|
||||
strings, data);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
|
||||
{
|
||||
size_t n_stats = 0;
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_tx_queues(channel)) {
|
||||
n_stats++;
|
||||
if (strings != NULL) {
|
||||
snprintf(strings, ETH_GSTRING_LEN,
|
||||
"tx-%u.tx_packets",
|
||||
channel->tx_queue[0].queue /
|
||||
EFX_TXQ_TYPES);
|
||||
|
||||
strings += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_rx_queue(channel)) {
|
||||
n_stats++;
|
||||
if (strings != NULL) {
|
||||
snprintf(strings, ETH_GSTRING_LEN,
|
||||
"rx-%d.rx_packets", channel->channel);
|
||||
strings += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
|
||||
unsigned short xdp;
|
||||
|
||||
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
|
||||
n_stats++;
|
||||
if (strings) {
|
||||
snprintf(strings, ETH_GSTRING_LEN,
|
||||
"tx-xdp-cpu-%hu.tx_packets", xdp);
|
||||
strings += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return n_stats;
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
|
||||
int string_set)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
switch (string_set) {
|
||||
case ETH_SS_STATS:
|
||||
return efx->type->describe_stats(efx, NULL) +
|
||||
EFX_ETHTOOL_SW_STAT_COUNT +
|
||||
efx_describe_per_queue_stats(efx, NULL) +
|
||||
efx_ptp_describe_stats(efx, NULL);
|
||||
case ETH_SS_TEST:
|
||||
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_strings(struct net_device *net_dev,
|
||||
u32 string_set, u8 *strings)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
int i;
|
||||
|
||||
switch (string_set) {
|
||||
case ETH_SS_STATS:
|
||||
strings += (efx->type->describe_stats(efx, strings) *
|
||||
ETH_GSTRING_LEN);
|
||||
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
|
||||
strlcpy(strings + i * ETH_GSTRING_LEN,
|
||||
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
|
||||
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
|
||||
strings += (efx_describe_per_queue_stats(efx, strings) *
|
||||
ETH_GSTRING_LEN);
|
||||
efx_ptp_describe_stats(efx, strings);
|
||||
break;
|
||||
case ETH_SS_TEST:
|
||||
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
|
||||
break;
|
||||
default:
|
||||
/* No other string sets */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_stats(struct net_device *net_dev,
|
||||
struct ethtool_stats *stats,
|
||||
u64 *data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
const struct efx_sw_stat_desc *stat;
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct efx_rx_queue *rx_queue;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&efx->stats_lock);
|
||||
|
||||
/* Get NIC statistics */
|
||||
data += efx->type->update_stats(efx, data, NULL);
|
||||
|
||||
/* Get software statistics */
|
||||
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
|
||||
stat = &efx_sw_stat_desc[i];
|
||||
switch (stat->source) {
|
||||
case EFX_ETHTOOL_STAT_SOURCE_nic:
|
||||
data[i] = stat->get_stat((void *)efx + stat->offset);
|
||||
break;
|
||||
case EFX_ETHTOOL_STAT_SOURCE_channel:
|
||||
data[i] = 0;
|
||||
efx_for_each_channel(channel, efx)
|
||||
data[i] += stat->get_stat((void *)channel +
|
||||
stat->offset);
|
||||
break;
|
||||
case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
|
||||
data[i] = 0;
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel)
|
||||
data[i] +=
|
||||
stat->get_stat((void *)tx_queue
|
||||
+ stat->offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
data += EFX_ETHTOOL_SW_STAT_COUNT;
|
||||
|
||||
spin_unlock_bh(&efx->stats_lock);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_tx_queues(channel)) {
|
||||
*data = 0;
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
*data += tx_queue->tx_packets;
|
||||
}
|
||||
data++;
|
||||
}
|
||||
}
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_rx_queue(channel)) {
|
||||
*data = 0;
|
||||
efx_for_each_channel_rx_queue(rx_queue, channel) {
|
||||
*data += rx_queue->rx_packets;
|
||||
}
|
||||
data++;
|
||||
}
|
||||
}
|
||||
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
|
||||
int xdp;
|
||||
|
||||
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
|
||||
data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
efx_ptp_update_stats(efx, data);
|
||||
}
|
||||
|
||||
static void efx_ethtool_self_test(struct net_device *net_dev,
|
||||
struct ethtool_test *test, u64 *data)
|
||||
{
|
||||
@@ -787,16 +361,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
|
||||
struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
|
||||
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
|
||||
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_wol(struct net_device *net_dev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
@@ -1456,7 +1020,7 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
|
||||
rc = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
|
||||
ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
|
||||
/* Initialise indir table and key to defaults */
|
||||
efx_set_default_rx_indir_table(efx, ctx);
|
||||
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
|
||||
|
457
drivers/net/ethernet/sfc/ethtool_common.c
Normal file
457
drivers/net/ethernet/sfc/ethtool_common.c
Normal file
@@ -0,0 +1,457 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "ethtool_common.h"
|
||||
|
||||
struct efx_sw_stat_desc {
|
||||
const char *name;
|
||||
enum {
|
||||
EFX_ETHTOOL_STAT_SOURCE_nic,
|
||||
EFX_ETHTOOL_STAT_SOURCE_channel,
|
||||
EFX_ETHTOOL_STAT_SOURCE_tx_queue
|
||||
} source;
|
||||
unsigned int offset;
|
||||
u64 (*get_stat)(void *field); /* Reader function */
|
||||
};
|
||||
|
||||
/* Initialiser for a struct efx_sw_stat_desc with type-checking */
|
||||
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
|
||||
get_stat_function) { \
|
||||
.name = #stat_name, \
|
||||
.source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
|
||||
.offset = ((((field_type *) 0) == \
|
||||
&((struct efx_##source_name *)0)->field) ? \
|
||||
offsetof(struct efx_##source_name, field) : \
|
||||
offsetof(struct efx_##source_name, field)), \
|
||||
.get_stat = get_stat_function, \
|
||||
}
|
||||
|
||||
static u64 efx_get_uint_stat(void *field)
|
||||
{
|
||||
return *(unsigned int *)field;
|
||||
}
|
||||
|
||||
static u64 efx_get_atomic_stat(void *field)
|
||||
{
|
||||
return atomic_read((atomic_t *) field);
|
||||
}
|
||||
|
||||
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
|
||||
EFX_ETHTOOL_STAT(field, nic, field, \
|
||||
atomic_t, efx_get_atomic_stat)
|
||||
|
||||
#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
|
||||
EFX_ETHTOOL_STAT(field, channel, n_##field, \
|
||||
unsigned int, efx_get_uint_stat)
|
||||
#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
|
||||
EFX_ETHTOOL_STAT(field, channel, field, \
|
||||
unsigned int, efx_get_uint_stat)
|
||||
|
||||
#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
|
||||
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
|
||||
unsigned int, efx_get_uint_stat)
|
||||
|
||||
static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
|
||||
EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
|
||||
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
|
||||
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
|
||||
#endif
|
||||
};
|
||||
|
||||
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
|
||||
|
||||
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
|
||||
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
|
||||
efx_mcdi_print_fwver(efx, info->fw_version,
|
||||
sizeof(info->fw_version));
|
||||
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
|
||||
}
|
||||
|
||||
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
return efx->msg_enable;
|
||||
}
|
||||
|
||||
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
efx->msg_enable = msg_enable;
|
||||
}
|
||||
|
||||
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
|
||||
struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
|
||||
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
|
||||
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_fill_test - fill in an individual self-test entry
|
||||
* @test_index: Index of the test
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
* @test: Pointer to test result (used only if data != %NULL)
|
||||
* @unit_format: Unit name format (e.g. "chan\%d")
|
||||
* @unit_id: Unit id (e.g. 0 for "chan0")
|
||||
* @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
|
||||
* @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
|
||||
*
|
||||
* Fill in an individual self-test entry.
|
||||
*/
|
||||
static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
|
||||
int *test, const char *unit_format, int unit_id,
|
||||
const char *test_format, const char *test_id)
|
||||
{
|
||||
char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
|
||||
|
||||
/* Fill data value, if applicable */
|
||||
if (data)
|
||||
data[test_index] = *test;
|
||||
|
||||
/* Fill string, if applicable */
|
||||
if (strings) {
|
||||
if (strchr(unit_format, '%'))
|
||||
snprintf(unit_str, sizeof(unit_str),
|
||||
unit_format, unit_id);
|
||||
else
|
||||
strcpy(unit_str, unit_format);
|
||||
snprintf(test_str, sizeof(test_str), test_format, test_id);
|
||||
snprintf(strings + test_index * ETH_GSTRING_LEN,
|
||||
ETH_GSTRING_LEN,
|
||||
"%-6s %-24s", unit_str, test_str);
|
||||
}
|
||||
}
|
||||
|
||||
#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
|
||||
#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
|
||||
#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
|
||||
#define EFX_LOOPBACK_NAME(_mode, _counter) \
|
||||
"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
|
||||
|
||||
/**
|
||||
* efx_fill_loopback_test - fill in a block of loopback self-test entries
|
||||
* @efx: Efx NIC
|
||||
* @lb_tests: Efx loopback self-test results structure
|
||||
* @mode: Loopback test mode
|
||||
* @test_index: Starting index of the test
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
*
|
||||
* Fill in a block of loopback self-test entries. Return new test
|
||||
* index.
|
||||
*/
|
||||
static int efx_fill_loopback_test(struct efx_nic *efx,
|
||||
struct efx_loopback_self_tests *lb_tests,
|
||||
enum efx_loopback_mode mode,
|
||||
unsigned int test_index,
|
||||
u8 *strings, u64 *data)
|
||||
{
|
||||
struct efx_channel *channel =
|
||||
efx_get_channel(efx, efx->tx_channel_offset);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->tx_sent[tx_queue->queue],
|
||||
EFX_TX_QUEUE_NAME(tx_queue),
|
||||
EFX_LOOPBACK_NAME(mode, "tx_sent"));
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->tx_done[tx_queue->queue],
|
||||
EFX_TX_QUEUE_NAME(tx_queue),
|
||||
EFX_LOOPBACK_NAME(mode, "tx_done"));
|
||||
}
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->rx_good,
|
||||
"rx", 0,
|
||||
EFX_LOOPBACK_NAME(mode, "rx_good"));
|
||||
efx_fill_test(test_index++, strings, data,
|
||||
&lb_tests->rx_bad,
|
||||
"rx", 0,
|
||||
EFX_LOOPBACK_NAME(mode, "rx_bad"));
|
||||
|
||||
return test_index;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_ethtool_fill_self_tests - get self-test details
|
||||
* @efx: Efx NIC
|
||||
* @tests: Efx self-test results structure, or %NULL
|
||||
* @strings: Ethtool strings, or %NULL
|
||||
* @data: Ethtool test results, or %NULL
|
||||
*
|
||||
* Get self-test number of strings, strings, and/or test results.
|
||||
* Return number of strings (== number of test results).
|
||||
*
|
||||
* The reason for merging these three functions is to make sure that
|
||||
* they can never be inconsistent.
|
||||
*/
|
||||
int efx_ethtool_fill_self_tests(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests,
|
||||
u8 *strings, u64 *data)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int n = 0, i;
|
||||
enum efx_loopback_mode mode;
|
||||
|
||||
efx_fill_test(n++, strings, data, &tests->phy_alive,
|
||||
"phy", 0, "alive", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->nvram,
|
||||
"core", 0, "nvram", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->interrupt,
|
||||
"core", 0, "interrupt", NULL);
|
||||
|
||||
/* Event queues */
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_dma[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.dma", NULL);
|
||||
efx_fill_test(n++, strings, data,
|
||||
&tests->eventq_int[channel->channel],
|
||||
EFX_CHANNEL_NAME(channel),
|
||||
"eventq.int", NULL);
|
||||
}
|
||||
|
||||
efx_fill_test(n++, strings, data, &tests->memory,
|
||||
"core", 0, "memory", NULL);
|
||||
efx_fill_test(n++, strings, data, &tests->registers,
|
||||
"core", 0, "registers", NULL);
|
||||
|
||||
if (efx->phy_op->run_tests != NULL) {
|
||||
EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
|
||||
|
||||
for (i = 0; true; ++i) {
|
||||
const char *name;
|
||||
|
||||
EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
|
||||
name = efx->phy_op->test_name(efx, i);
|
||||
if (name == NULL)
|
||||
break;
|
||||
|
||||
efx_fill_test(n++, strings, data, &tests->phy_ext[i],
|
||||
"phy", 0, name, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Loopback tests */
|
||||
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(efx->loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
n = efx_fill_loopback_test(efx,
|
||||
&tests->loopback[mode], mode, n,
|
||||
strings, data);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
|
||||
{
|
||||
size_t n_stats = 0;
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_tx_queues(channel)) {
|
||||
n_stats++;
|
||||
if (strings != NULL) {
|
||||
snprintf(strings, ETH_GSTRING_LEN,
|
||||
"tx-%u.tx_packets",
|
||||
channel->tx_queue[0].queue /
|
||||
EFX_TXQ_TYPES);
|
||||
|
||||
strings += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_rx_queue(channel)) {
|
||||
n_stats++;
|
||||
if (strings != NULL) {
|
||||
snprintf(strings, ETH_GSTRING_LEN,
|
||||
"rx-%d.rx_packets", channel->channel);
|
||||
strings += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
|
||||
unsigned short xdp;
|
||||
|
||||
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
|
||||
n_stats++;
|
||||
if (strings) {
|
||||
snprintf(strings, ETH_GSTRING_LEN,
|
||||
"tx-xdp-cpu-%hu.tx_packets", xdp);
|
||||
strings += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return n_stats;
|
||||
}
|
||||
|
||||
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
switch (string_set) {
|
||||
case ETH_SS_STATS:
|
||||
return efx->type->describe_stats(efx, NULL) +
|
||||
EFX_ETHTOOL_SW_STAT_COUNT +
|
||||
efx_describe_per_queue_stats(efx, NULL) +
|
||||
efx_ptp_describe_stats(efx, NULL);
|
||||
case ETH_SS_TEST:
|
||||
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
void efx_ethtool_get_strings(struct net_device *net_dev,
|
||||
u32 string_set, u8 *strings)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
int i;
|
||||
|
||||
switch (string_set) {
|
||||
case ETH_SS_STATS:
|
||||
strings += (efx->type->describe_stats(efx, strings) *
|
||||
ETH_GSTRING_LEN);
|
||||
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
|
||||
strlcpy(strings + i * ETH_GSTRING_LEN,
|
||||
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
|
||||
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
|
||||
strings += (efx_describe_per_queue_stats(efx, strings) *
|
||||
ETH_GSTRING_LEN);
|
||||
efx_ptp_describe_stats(efx, strings);
|
||||
break;
|
||||
case ETH_SS_TEST:
|
||||
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
|
||||
break;
|
||||
default:
|
||||
/* No other string sets */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void efx_ethtool_get_stats(struct net_device *net_dev,
|
||||
struct ethtool_stats *stats,
|
||||
u64 *data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
const struct efx_sw_stat_desc *stat;
|
||||
struct efx_channel *channel;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct efx_rx_queue *rx_queue;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&efx->stats_lock);
|
||||
|
||||
/* Get NIC statistics */
|
||||
data += efx->type->update_stats(efx, data, NULL);
|
||||
|
||||
/* Get software statistics */
|
||||
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
|
||||
stat = &efx_sw_stat_desc[i];
|
||||
switch (stat->source) {
|
||||
case EFX_ETHTOOL_STAT_SOURCE_nic:
|
||||
data[i] = stat->get_stat((void *)efx + stat->offset);
|
||||
break;
|
||||
case EFX_ETHTOOL_STAT_SOURCE_channel:
|
||||
data[i] = 0;
|
||||
efx_for_each_channel(channel, efx)
|
||||
data[i] += stat->get_stat((void *)channel +
|
||||
stat->offset);
|
||||
break;
|
||||
case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
|
||||
data[i] = 0;
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel)
|
||||
data[i] +=
|
||||
stat->get_stat((void *)tx_queue
|
||||
+ stat->offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
data += EFX_ETHTOOL_SW_STAT_COUNT;
|
||||
|
||||
spin_unlock_bh(&efx->stats_lock);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_tx_queues(channel)) {
|
||||
*data = 0;
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
*data += tx_queue->tx_packets;
|
||||
}
|
||||
data++;
|
||||
}
|
||||
}
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (efx_channel_has_rx_queue(channel)) {
|
||||
*data = 0;
|
||||
efx_for_each_channel_rx_queue(rx_queue, channel) {
|
||||
*data += rx_queue->rx_packets;
|
||||
}
|
||||
data++;
|
||||
}
|
||||
}
|
||||
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
|
||||
int xdp;
|
||||
|
||||
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
|
||||
data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
|
||||
data++;
|
||||
}
|
||||
}
|
||||
|
||||
efx_ptp_update_stats(efx, data);
|
||||
}
|
30
drivers/net/ethernet/sfc/ethtool_common.h
Normal file
30
drivers/net/ethernet/sfc/ethtool_common.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_ETHTOOL_COMMON_H
|
||||
#define EFX_ETHTOOL_COMMON_H
|
||||
|
||||
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
|
||||
struct ethtool_drvinfo *info);
|
||||
u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
|
||||
void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
|
||||
void efx_ethtool_get_pauseparam(struct net_device *net_dev,
|
||||
struct ethtool_pauseparam *pause);
|
||||
int efx_ethtool_fill_self_tests(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests,
|
||||
u8 *strings, u64 *data);
|
||||
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
|
||||
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
|
||||
u8 *strings);
|
||||
void efx_ethtool_get_stats(struct net_device *net_dev,
|
||||
struct ethtool_stats *stats __attribute__ ((unused)),
|
||||
u64 *data);
|
||||
|
||||
#endif
|
@@ -2108,7 +2108,7 @@ static void ef4_net_stats(struct net_device *net_dev,
|
||||
}
|
||||
|
||||
/* Context: netif_tx_lock held, BHs disabled. */
|
||||
static void ef4_watchdog(struct net_device *net_dev)
|
||||
static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
|
||||
{
|
||||
struct ef4_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include "net_driver.h"
|
||||
#include "bitfield.h"
|
||||
#include "efx.h"
|
||||
#include "rx_common.h"
|
||||
#include "nic.h"
|
||||
#include "farch_regs.h"
|
||||
#include "sriov.h"
|
||||
|
@@ -346,11 +346,8 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx);
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx);
|
||||
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx);
|
||||
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx);
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
|
||||
|
2270
drivers/net/ethernet/sfc/mcdi_filters.c
Normal file
2270
drivers/net/ethernet/sfc/mcdi_filters.c
Normal file
File diff suppressed because it is too large
Load Diff
159
drivers/net/ethernet/sfc/mcdi_filters.h
Normal file
159
drivers/net/ethernet/sfc/mcdi_filters.h
Normal file
@@ -0,0 +1,159 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#ifndef EFX_MCDI_FILTERS_H
|
||||
#define EFX_MCDI_FILTERS_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "filter.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
#define EFX_EF10_FILTER_DEV_UC_MAX 32
|
||||
#define EFX_EF10_FILTER_DEV_MC_MAX 256
|
||||
|
||||
enum efx_mcdi_filter_default_filters {
|
||||
EFX_EF10_BCAST,
|
||||
EFX_EF10_UCDEF,
|
||||
EFX_EF10_MCDEF,
|
||||
EFX_EF10_VXLAN4_UCDEF,
|
||||
EFX_EF10_VXLAN4_MCDEF,
|
||||
EFX_EF10_VXLAN6_UCDEF,
|
||||
EFX_EF10_VXLAN6_MCDEF,
|
||||
EFX_EF10_NVGRE4_UCDEF,
|
||||
EFX_EF10_NVGRE4_MCDEF,
|
||||
EFX_EF10_NVGRE6_UCDEF,
|
||||
EFX_EF10_NVGRE6_MCDEF,
|
||||
EFX_EF10_GENEVE4_UCDEF,
|
||||
EFX_EF10_GENEVE4_MCDEF,
|
||||
EFX_EF10_GENEVE6_UCDEF,
|
||||
EFX_EF10_GENEVE6_MCDEF,
|
||||
|
||||
EFX_EF10_NUM_DEFAULT_FILTERS
|
||||
};
|
||||
|
||||
/* Per-VLAN filters information */
|
||||
struct efx_mcdi_filter_vlan {
|
||||
struct list_head list;
|
||||
u16 vid;
|
||||
u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
|
||||
u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
|
||||
u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
|
||||
};
|
||||
|
||||
struct efx_mcdi_dev_addr {
|
||||
u8 addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
struct efx_mcdi_filter_table {
|
||||
/* The MCDI match masks supported by this fw & hw, in order of priority */
|
||||
u32 rx_match_mcdi_flags[
|
||||
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
|
||||
unsigned int rx_match_count;
|
||||
|
||||
struct rw_semaphore lock; /* Protects entries */
|
||||
struct {
|
||||
unsigned long spec; /* pointer to spec plus flag bits */
|
||||
/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
|
||||
/* unused flag 1UL */
|
||||
#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
|
||||
#define EFX_EF10_FILTER_FLAGS 3UL
|
||||
u64 handle; /* firmware handle */
|
||||
} *entry;
|
||||
/* Shadow of net_device address lists, guarded by mac_lock */
|
||||
struct efx_mcdi_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
|
||||
struct efx_mcdi_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
|
||||
int dev_uc_count;
|
||||
int dev_mc_count;
|
||||
bool uc_promisc;
|
||||
bool mc_promisc;
|
||||
/* Whether in multicast promiscuous mode when last changed */
|
||||
bool mc_promisc_last;
|
||||
bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
|
||||
bool vlan_filter;
|
||||
struct list_head vlan_list;
|
||||
};
|
||||
|
||||
int efx_mcdi_filter_table_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_filter_table_remove(struct efx_nic *efx);
|
||||
void efx_mcdi_filter_table_restore(struct efx_nic *efx);
|
||||
|
||||
/*
|
||||
* The filter table(s) are managed by firmware and we have write-only
|
||||
* access. When removing filters we must identify them to the
|
||||
* firmware by a 64-bit handle, but this is too wide for Linux kernel
|
||||
* interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
|
||||
* be able to tell in advance whether a requested insertion will
|
||||
* replace an existing filter. Therefore we maintain a software hash
|
||||
* table, which should be at least as large as the hardware hash
|
||||
* table.
|
||||
*
|
||||
* Huntington has a single 8K filter table shared between all filter
|
||||
* types and both ports.
|
||||
*/
|
||||
#define EFX_MCDI_FILTER_TBL_ROWS 8192
|
||||
|
||||
bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
|
||||
bool encap,
|
||||
enum efx_filter_match_flags match_flags);
|
||||
|
||||
void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx);
|
||||
s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
|
||||
bool replace_equal);
|
||||
int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
int efx_mcdi_filter_get_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *spec);
|
||||
|
||||
u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx);
|
||||
s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size);
|
||||
|
||||
void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx);
|
||||
int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid);
|
||||
struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx, u16 vid);
|
||||
void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
|
||||
|
||||
void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
|
||||
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx,
|
||||
const u32 *rx_indir_table,
|
||||
const u8 *key);
|
||||
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
|
||||
const u32 *rx_indir_table,
|
||||
const u8 *key);
|
||||
int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
|
||||
const u32 *rx_indir_table
|
||||
__attribute__ ((unused)),
|
||||
const u8 *key
|
||||
__attribute__ ((unused)));
|
||||
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
|
||||
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx);
|
||||
int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
|
||||
u32 *flags);
|
||||
void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx);
|
||||
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
|
||||
|
||||
static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
|
||||
{
|
||||
/* no need to do anything here */
|
||||
}
|
||||
|
||||
bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int filter_idx);
|
||||
|
||||
#endif
|
386
drivers/net/ethernet/sfc/mcdi_functions.c
Normal file
386
drivers/net/ethernet/sfc/mcdi_functions.c
Normal file
@@ -0,0 +1,386 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "mcdi_functions.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
int efx_mcdi_free_vis(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF_ERR(outbuf);
|
||||
size_t outlen;
|
||||
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
|
||||
/* -EALREADY means nothing to free, so ignore */
|
||||
if (rc == -EALREADY)
|
||||
rc = 0;
|
||||
if (rc)
|
||||
efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
|
||||
unsigned int max_vis, unsigned int *vi_base,
|
||||
unsigned int *allocated_vis)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
|
||||
MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
|
||||
return -EIO;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
|
||||
MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
|
||||
|
||||
if (vi_base)
|
||||
*vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
|
||||
if (allocated_vis)
|
||||
*allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mcdi_ev_probe(struct efx_channel *channel)
|
||||
{
|
||||
return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
|
||||
(channel->eventq_mask + 1) *
|
||||
sizeof(efx_qword_t),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf,
|
||||
MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
|
||||
EFX_BUF_SIZE));
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
|
||||
size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
|
||||
struct efx_nic *efx = channel->efx;
|
||||
size_t inlen, outlen;
|
||||
dma_addr_t dma_addr;
|
||||
int rc, i;
|
||||
|
||||
/* Fill event queue with all ones (i.e. empty events) */
|
||||
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
|
||||
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
|
||||
/* INIT_EVQ expects index in vector table, not absolute */
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
|
||||
MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
|
||||
MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
|
||||
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
|
||||
|
||||
if (v2) {
|
||||
/* Use the new generic approach to specifying event queue
|
||||
* configuration, requesting lower latency or higher throughput.
|
||||
* The options that actually get used appear in the output.
|
||||
*/
|
||||
MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
|
||||
INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
|
||||
INIT_EVQ_V2_IN_FLAG_TYPE,
|
||||
MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
|
||||
} else {
|
||||
MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
|
||||
INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
|
||||
INIT_EVQ_IN_FLAG_RX_MERGE, 1,
|
||||
INIT_EVQ_IN_FLAG_TX_MERGE, 1,
|
||||
INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
|
||||
}
|
||||
|
||||
dma_addr = channel->eventq.buf.dma_addr;
|
||||
for (i = 0; i < entries; ++i) {
|
||||
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
|
||||
dma_addr += EFX_BUF_SIZE;
|
||||
}
|
||||
|
||||
inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
|
||||
if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"Channel %d using event queue flags %08x\n",
|
||||
channel->channel,
|
||||
MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_mcdi_ev_remove(struct efx_channel *channel)
|
||||
{
|
||||
efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
|
||||
}
|
||||
|
||||
void efx_mcdi_ev_fini(struct efx_channel *channel)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
|
||||
MCDI_DECLARE_BUF_ERR(outbuf);
|
||||
struct efx_nic *efx = channel->efx;
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
|
||||
if (rc && rc != -EALREADY)
|
||||
goto fail;
|
||||
|
||||
return;
|
||||
|
||||
fail:
|
||||
efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
|
||||
outbuf, outlen, rc);
|
||||
}
|
||||
|
||||
int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
|
||||
EFX_BUF_SIZE));
|
||||
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
|
||||
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
|
||||
struct efx_channel *channel = tx_queue->channel;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_ef10_nic_data *nic_data;
|
||||
dma_addr_t dma_addr;
|
||||
size_t inlen;
|
||||
int rc, i;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
|
||||
|
||||
nic_data = efx->nic_data;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
|
||||
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
|
||||
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
|
||||
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
|
||||
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
|
||||
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
|
||||
|
||||
dma_addr = tx_queue->txd.buf.dma_addr;
|
||||
|
||||
netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
|
||||
tx_queue->queue, entries, (u64)dma_addr);
|
||||
|
||||
for (i = 0; i < entries; ++i) {
|
||||
MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
|
||||
dma_addr += EFX_BUF_SIZE;
|
||||
}
|
||||
|
||||
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
|
||||
|
||||
do {
|
||||
MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
|
||||
/* This flag was removed from mcdi_pcol.h for
|
||||
* the non-_EXT version of INIT_TXQ. However,
|
||||
* firmware still honours it.
|
||||
*/
|
||||
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
|
||||
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
|
||||
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
|
||||
INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
|
||||
tx_queue->timestamping);
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
|
||||
NULL, 0, NULL);
|
||||
if (rc == -ENOSPC && tso_v2) {
|
||||
/* Retry without TSOv2 if we're short on contexts. */
|
||||
tso_v2 = false;
|
||||
netif_warn(efx, probe, efx->net_dev,
|
||||
"TSOv2 context not available to segment in "
|
||||
"hardware. TCP performance may be reduced.\n"
|
||||
);
|
||||
} else if (rc) {
|
||||
efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
|
||||
MC_CMD_INIT_TXQ_EXT_IN_LEN,
|
||||
NULL, 0, rc);
|
||||
goto fail;
|
||||
}
|
||||
} while (rc);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
|
||||
}
|
||||
|
||||
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
|
||||
MCDI_DECLARE_BUF_ERR(outbuf);
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
|
||||
tx_queue->queue);
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
|
||||
if (rc && rc != -EALREADY)
|
||||
goto fail;
|
||||
|
||||
return;
|
||||
|
||||
fail:
|
||||
efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
|
||||
outbuf, outlen, rc);
|
||||
}
|
||||
|
||||
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
|
||||
(rx_queue->ptr_mask + 1) *
|
||||
sizeof(efx_qword_t),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf,
|
||||
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
|
||||
EFX_BUF_SIZE));
|
||||
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
||||
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
dma_addr_t dma_addr;
|
||||
size_t inlen;
|
||||
int rc;
|
||||
int i;
|
||||
BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
|
||||
|
||||
rx_queue->scatter_n = 0;
|
||||
rx_queue->scatter_len = 0;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
|
||||
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
|
||||
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
|
||||
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
|
||||
efx_rx_queue_index(rx_queue));
|
||||
MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
|
||||
INIT_RXQ_IN_FLAG_PREFIX, 1,
|
||||
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
|
||||
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
|
||||
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
|
||||
|
||||
dma_addr = rx_queue->rxd.buf.dma_addr;
|
||||
|
||||
netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
|
||||
efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
|
||||
|
||||
for (i = 0; i < entries; ++i) {
|
||||
MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
|
||||
dma_addr += EFX_BUF_SIZE;
|
||||
}
|
||||
|
||||
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
|
||||
NULL, 0, NULL);
|
||||
if (rc)
|
||||
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
|
||||
efx_rx_queue_index(rx_queue));
|
||||
}
|
||||
|
||||
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
|
||||
}
|
||||
|
||||
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
|
||||
MCDI_DECLARE_BUF_ERR(outbuf);
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
|
||||
efx_rx_queue_index(rx_queue));
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
|
||||
if (rc && rc != -EALREADY)
|
||||
goto fail;
|
||||
|
||||
return;
|
||||
|
||||
fail:
|
||||
efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
|
||||
outbuf, outlen, rc);
|
||||
}
|
||||
|
||||
int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
|
||||
{
|
||||
switch (vi_window_mode) {
|
||||
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
|
||||
efx->vi_stride = 8192;
|
||||
break;
|
||||
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
|
||||
efx->vi_stride = 16384;
|
||||
break;
|
||||
case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
|
||||
efx->vi_stride = 65536;
|
||||
break;
|
||||
default:
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"Unrecognised VI window mode %d\n",
|
||||
vi_window_mode);
|
||||
return -EIO;
|
||||
}
|
||||
netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
|
||||
efx->vi_stride);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
|
||||
sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < sizeof(outbuf))
|
||||
return -EIO;
|
||||
|
||||
*pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
|
||||
return 0;
|
||||
}
|
32
drivers/net/ethernet/sfc/mcdi_functions.h
Normal file
32
drivers/net/ethernet/sfc/mcdi_functions.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#ifndef EFX_MCDI_FUNCTIONS_H
|
||||
#define EFX_MCDI_FUNCTIONS_H
|
||||
|
||||
int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
|
||||
unsigned int max_vis, unsigned int *vi_base,
|
||||
unsigned int *allocated_vis);
|
||||
int efx_mcdi_free_vis(struct efx_nic *efx);
|
||||
|
||||
int efx_mcdi_ev_probe(struct efx_channel *channel);
|
||||
int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
|
||||
void efx_mcdi_ev_remove(struct efx_channel *channel);
|
||||
void efx_mcdi_ev_fini(struct efx_channel *channel);
|
||||
int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
|
||||
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
|
||||
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
|
||||
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
|
||||
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
|
||||
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
|
||||
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
|
||||
int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
|
||||
int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
|
||||
|
||||
#endif
|
@@ -14,106 +14,7 @@
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
|
||||
struct efx_mcdi_phy_data {
|
||||
u32 flags;
|
||||
u32 type;
|
||||
u32 supported_cap;
|
||||
u32 channel;
|
||||
u32 port;
|
||||
u32 stats_mask;
|
||||
u8 name[20];
|
||||
u32 media;
|
||||
u32 mmd_mask;
|
||||
u8 revision[20];
|
||||
u32 forced_cap;
|
||||
};
|
||||
|
||||
static int
|
||||
efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
|
||||
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
|
||||
cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
|
||||
cfg->supported_cap =
|
||||
MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
|
||||
cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
|
||||
cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
|
||||
cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
|
||||
memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
|
||||
sizeof(cfg->name));
|
||||
cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
|
||||
cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
|
||||
memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
|
||||
sizeof(cfg->revision));
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
|
||||
u32 flags, u32 loopback_mode,
|
||||
u32 loopback_speed)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
|
||||
MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
#include "mcdi_port_common.h"
|
||||
|
||||
static int efx_mcdi_mdio_read(struct net_device *net_dev,
|
||||
int prtad, int devad, u16 addr)
|
||||
@@ -168,246 +69,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
|
||||
{
|
||||
#define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
|
||||
linkset)
|
||||
|
||||
bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
switch (media) {
|
||||
case MC_CMD_MEDIA_KX4:
|
||||
SET_BIT(Backplane);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
SET_BIT(1000baseKX_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseKX4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(40000baseKR4_Full);
|
||||
break;
|
||||
|
||||
case MC_CMD_MEDIA_XFP:
|
||||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
SET_BIT(FIBRE);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(40000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
|
||||
SET_BIT(100000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
|
||||
SET_BIT(25000baseCR_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
|
||||
SET_BIT(50000baseCR2_Full);
|
||||
break;
|
||||
|
||||
case MC_CMD_MEDIA_BASE_T:
|
||||
SET_BIT(TP);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
|
||||
SET_BIT(10baseT_Half);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
|
||||
SET_BIT(10baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
|
||||
SET_BIT(100baseT_Half);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
|
||||
SET_BIT(100baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
|
||||
SET_BIT(1000baseT_Half);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
break;
|
||||
}
|
||||
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
|
||||
SET_BIT(Pause);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
|
||||
SET_BIT(Asym_Pause);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
|
||||
SET_BIT(Autoneg);
|
||||
|
||||
#undef SET_BIT
|
||||
}
|
||||
|
||||
static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
|
||||
{
|
||||
u32 result = 0;
|
||||
|
||||
#define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
|
||||
linkset)
|
||||
|
||||
if (TEST_BIT(10baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
|
||||
if (TEST_BIT(10baseT_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
|
||||
if (TEST_BIT(100baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
|
||||
if (TEST_BIT(100baseT_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
|
||||
if (TEST_BIT(100000baseCR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
|
||||
if (TEST_BIT(25000baseCR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
|
||||
if (TEST_BIT(50000baseCR2_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
|
||||
if (TEST_BIT(Pause))
|
||||
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
|
||||
if (TEST_BIT(Asym_Pause))
|
||||
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
|
||||
if (TEST_BIT(Autoneg))
|
||||
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
|
||||
|
||||
#undef TEST_BIT
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
enum efx_phy_mode mode, supported;
|
||||
u32 flags;
|
||||
|
||||
/* TODO: Advertise the capabilities supported by this PHY */
|
||||
supported = 0;
|
||||
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
|
||||
supported |= PHY_MODE_TX_DISABLED;
|
||||
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
|
||||
supported |= PHY_MODE_LOW_POWER;
|
||||
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
|
||||
supported |= PHY_MODE_OFF;
|
||||
|
||||
mode = efx->phy_mode & supported;
|
||||
|
||||
flags = 0;
|
||||
if (mode & PHY_MODE_TX_DISABLED)
|
||||
flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
|
||||
if (mode & PHY_MODE_LOW_POWER)
|
||||
flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
|
||||
if (mode & PHY_MODE_OFF)
|
||||
flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u8 mcdi_to_ethtool_media(u32 media)
|
||||
{
|
||||
switch (media) {
|
||||
case MC_CMD_MEDIA_XAUI:
|
||||
case MC_CMD_MEDIA_CX4:
|
||||
case MC_CMD_MEDIA_KX4:
|
||||
return PORT_OTHER;
|
||||
|
||||
case MC_CMD_MEDIA_XFP:
|
||||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
return PORT_FIBRE;
|
||||
|
||||
case MC_CMD_MEDIA_BASE_T:
|
||||
return PORT_TP;
|
||||
|
||||
default:
|
||||
return PORT_OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
|
||||
struct efx_link_state *link_state,
|
||||
u32 speed, u32 flags, u32 fcntl)
|
||||
{
|
||||
switch (fcntl) {
|
||||
case MC_CMD_FCNTL_AUTO:
|
||||
WARN_ON(1); /* This is not a link mode */
|
||||
link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
|
||||
break;
|
||||
case MC_CMD_FCNTL_BIDIR:
|
||||
link_state->fc = EFX_FC_TX | EFX_FC_RX;
|
||||
break;
|
||||
case MC_CMD_FCNTL_RESPOND:
|
||||
link_state->fc = EFX_FC_RX;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
/* Fall through */
|
||||
case MC_CMD_FCNTL_OFF:
|
||||
link_state->fc = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
|
||||
link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
|
||||
link_state->speed = speed;
|
||||
}
|
||||
|
||||
/* The semantics of the ethtool FEC mode bitmask are not well defined,
|
||||
* particularly the meaning of combinations of bits. Which means we get to
|
||||
* define our own semantics, as follows:
|
||||
* OFF overrides any other bits, and means "disable all FEC" (with the
|
||||
* exception of 25G KR4/CR4, where it is not possible to reject it if AN
|
||||
* partner requests it).
|
||||
* AUTO on its own means use cable requirements and link partner autoneg with
|
||||
* fw-default preferences for the cable type.
|
||||
* AUTO and either RS or BASER means use the specified FEC type if cable and
|
||||
* link partner support it, otherwise autoneg/fw-default.
|
||||
* RS or BASER alone means use the specified FEC type if cable and link partner
|
||||
* support it and either requests it, otherwise no FEC.
|
||||
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
|
||||
* partner support it, preferring RS to BASER.
|
||||
*/
|
||||
static u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
|
||||
{
|
||||
u32 ret = 0;
|
||||
|
||||
if (ethtool_cap & ETHTOOL_FEC_OFF)
|
||||
return 0;
|
||||
|
||||
if (ethtool_cap & ETHTOOL_FEC_AUTO)
|
||||
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
|
||||
if (ethtool_cap & ETHTOOL_FEC_RS)
|
||||
ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
|
||||
if (ethtool_cap & ETHTOOL_FEC_BASER)
|
||||
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
|
||||
* can never produce, (baser xor rs) and neither req; the implementation below
|
||||
* maps both of those to AUTO. This should never matter, and it's not clear
|
||||
* what a better mapping would be anyway.
|
||||
*/
|
||||
static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
|
||||
{
|
||||
bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
|
||||
rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
|
||||
baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
|
||||
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
|
||||
baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
|
||||
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
|
||||
|
||||
if (!baser && !rs)
|
||||
return ETHTOOL_FEC_OFF;
|
||||
return (rs_req ? ETHTOOL_FEC_RS : 0) |
|
||||
(baser_req ? ETHTOOL_FEC_BASER : 0) |
|
||||
(baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
|
||||
}
|
||||
|
||||
static int efx_mcdi_phy_probe(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_data;
|
||||
@@ -527,58 +188,6 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
|
||||
efx->loopback_mode, 0);
|
||||
}
|
||||
|
||||
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
|
||||
* supported by the link partner. Warn the user if this isn't the case
|
||||
*/
|
||||
static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
u32 rmtadv;
|
||||
|
||||
/* The link partner capabilities are only relevant if the
|
||||
* link supports flow control autonegotiation */
|
||||
if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
|
||||
return;
|
||||
|
||||
/* If flow control autoneg is supported and enabled, then fine */
|
||||
if (efx->wanted_fc & EFX_FC_AUTO)
|
||||
return;
|
||||
|
||||
rmtadv = 0;
|
||||
if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
|
||||
rmtadv |= ADVERTISED_Pause;
|
||||
if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
|
||||
rmtadv |= ADVERTISED_Asym_Pause;
|
||||
|
||||
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
|
||||
netif_err(efx, link, efx->net_dev,
|
||||
"warning: link partner doesn't support pause frames");
|
||||
}
|
||||
|
||||
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_link_state old_state = efx->link_state;
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
|
||||
int rc;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&efx->mac_lock));
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
|
||||
outbuf, sizeof(outbuf), NULL);
|
||||
if (rc)
|
||||
efx->link_state.up = false;
|
||||
else
|
||||
efx_mcdi_phy_decode_link(
|
||||
efx, &efx->link_state,
|
||||
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
|
||||
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
|
||||
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
|
||||
|
||||
return !efx_link_state_equal(&efx->link_state, &old_state);
|
||||
}
|
||||
|
||||
static void efx_mcdi_phy_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
|
||||
@@ -666,58 +275,6 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
|
||||
struct ethtool_fecparam *fec)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
|
||||
u32 caps, active, speed; /* MCDI format */
|
||||
bool is_25g = false;
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* behaviour for 25G/50G links depends on 25G BASER bit */
|
||||
speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
|
||||
is_25g = speed == 25000 || speed == 50000;
|
||||
|
||||
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
|
||||
fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
|
||||
/* BASER is never supported on 100G */
|
||||
if (speed == 100000)
|
||||
fec->fec &= ~ETHTOOL_FEC_BASER;
|
||||
|
||||
active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
|
||||
switch (active) {
|
||||
case MC_CMD_FEC_NONE:
|
||||
fec->active_fec = ETHTOOL_FEC_OFF;
|
||||
break;
|
||||
case MC_CMD_FEC_BASER:
|
||||
fec->active_fec = ETHTOOL_FEC_BASER;
|
||||
break;
|
||||
case MC_CMD_FEC_RS:
|
||||
fec->active_fec = ETHTOOL_FEC_RS;
|
||||
break;
|
||||
default:
|
||||
netif_warn(efx, hw, efx->net_dev,
|
||||
"Firmware reports unrecognised FEC_TYPE %u\n",
|
||||
active);
|
||||
/* We don't know what firmware has picked. AUTO is as good a
|
||||
* "can't happen" value as any other.
|
||||
*/
|
||||
fec->active_fec = ETHTOOL_FEC_AUTO;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
|
||||
const struct ethtool_fecparam *fec)
|
||||
{
|
||||
@@ -745,27 +302,6 @@ static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
|
||||
return -EIO;
|
||||
if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *const mcdi_sft9001_cable_diag_names[] = {
|
||||
"cable.pairA.length",
|
||||
"cable.pairB.length",
|
||||
@@ -1139,84 +675,6 @@ u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
|
||||
return phy_data->supported_cap;
|
||||
}
|
||||
|
||||
static unsigned int efx_mcdi_event_link_speed[] = {
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
|
||||
};
|
||||
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
u32 flags, fcntl, speed, lpa;
|
||||
|
||||
speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
|
||||
EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
|
||||
speed = efx_mcdi_event_link_speed[speed];
|
||||
|
||||
flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
|
||||
fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
|
||||
lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
|
||||
|
||||
/* efx->link_state is only modified by efx_mcdi_phy_get_link(),
|
||||
* which is only run after flushing the event queues. Therefore, it
|
||||
* is safe to modify the link state outside of the mac_lock here.
|
||||
*/
|
||||
efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
|
||||
|
||||
efx_mcdi_phy_check_fcntl(efx, lpa);
|
||||
|
||||
efx_link_status_changed(efx);
|
||||
}
|
||||
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx)
|
||||
{
|
||||
u32 fcntl;
|
||||
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
|
||||
|
||||
/* This has no effect on EF10 */
|
||||
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
|
||||
efx->net_dev->dev_addr);
|
||||
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
|
||||
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
|
||||
|
||||
/* Set simple MAC filter for Siena */
|
||||
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
|
||||
SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
|
||||
|
||||
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
|
||||
SET_MAC_IN_FLAG_INCLUDE_FCS,
|
||||
!!(efx->net_dev->features & NETIF_F_RXFCS));
|
||||
|
||||
switch (efx->wanted_fc) {
|
||||
case EFX_FC_RX | EFX_FC_TX:
|
||||
fcntl = MC_CMD_FCNTL_BIDIR;
|
||||
break;
|
||||
case EFX_FC_RX:
|
||||
fcntl = MC_CMD_FCNTL_RESPOND;
|
||||
break;
|
||||
default:
|
||||
fcntl = MC_CMD_FCNTL_OFF;
|
||||
break;
|
||||
}
|
||||
if (efx->wanted_fc & EFX_FC_AUTO)
|
||||
fcntl = MC_CMD_FCNTL_AUTO;
|
||||
if (efx->fc_disable)
|
||||
fcntl = MC_CMD_FCNTL_OFF;
|
||||
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
|
||||
|
||||
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
|
||||
NULL, 0, NULL);
|
||||
}
|
||||
|
||||
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
|
||||
@@ -1348,17 +806,3 @@ void efx_mcdi_port_remove(struct efx_nic *efx)
|
||||
efx->phy_op->remove(efx);
|
||||
efx_nic_free_buffer(efx, &efx->stats_buffer);
|
||||
}
|
||||
|
||||
/* Get physical port number (EF10 only; on Siena it is same as PF number) */
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
|
||||
outbuf, sizeof(outbuf), NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
|
||||
}
|
||||
|
568
drivers/net/ethernet/sfc/mcdi_port_common.c
Normal file
568
drivers/net/ethernet/sfc/mcdi_port_common.c
Normal file
@@ -0,0 +1,568 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "mcdi_port_common.h"
|
||||
#include "efx_common.h"
|
||||
|
||||
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
|
||||
BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
|
||||
cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
|
||||
cfg->supported_cap =
|
||||
MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
|
||||
cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
|
||||
cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
|
||||
cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
|
||||
memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
|
||||
sizeof(cfg->name));
|
||||
cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
|
||||
cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
|
||||
memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
|
||||
sizeof(cfg->revision));
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_link_set_advertising(struct efx_nic *efx,
|
||||
const unsigned long *advertising)
|
||||
{
|
||||
memcpy(efx->link_advertising, advertising,
|
||||
sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
|
||||
|
||||
efx->link_advertising[0] |= ADVERTISED_Autoneg;
|
||||
if (advertising[0] & ADVERTISED_Pause)
|
||||
efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
|
||||
else
|
||||
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
|
||||
if (advertising[0] & ADVERTISED_Asym_Pause)
|
||||
efx->wanted_fc ^= EFX_FC_TX;
|
||||
}
|
||||
|
||||
int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
|
||||
u32 flags, u32 loopback_mode, u32 loopback_speed)
|
||||
{
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
|
||||
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
|
||||
NULL, 0, NULL);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
|
||||
MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
|
||||
{
|
||||
#define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
|
||||
linkset)
|
||||
|
||||
bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||
switch (media) {
|
||||
case MC_CMD_MEDIA_KX4:
|
||||
SET_BIT(Backplane);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
SET_BIT(1000baseKX_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseKX4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(40000baseKR4_Full);
|
||||
break;
|
||||
|
||||
case MC_CMD_MEDIA_XFP:
|
||||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
SET_BIT(FIBRE);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(40000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
|
||||
SET_BIT(100000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
|
||||
SET_BIT(25000baseCR_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
|
||||
SET_BIT(50000baseCR2_Full);
|
||||
break;
|
||||
|
||||
case MC_CMD_MEDIA_BASE_T:
|
||||
SET_BIT(TP);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
|
||||
SET_BIT(10baseT_Half);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
|
||||
SET_BIT(10baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
|
||||
SET_BIT(100baseT_Half);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
|
||||
SET_BIT(100baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
|
||||
SET_BIT(1000baseT_Half);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
break;
|
||||
}
|
||||
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
|
||||
SET_BIT(Pause);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
|
||||
SET_BIT(Asym_Pause);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
|
||||
SET_BIT(Autoneg);
|
||||
|
||||
#undef SET_BIT
|
||||
}
|
||||
|
||||
u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
|
||||
{
|
||||
u32 result = 0;
|
||||
|
||||
#define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
|
||||
linkset)
|
||||
|
||||
if (TEST_BIT(10baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
|
||||
if (TEST_BIT(10baseT_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
|
||||
if (TEST_BIT(100baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
|
||||
if (TEST_BIT(100baseT_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
|
||||
if (TEST_BIT(100000baseCR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
|
||||
if (TEST_BIT(25000baseCR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
|
||||
if (TEST_BIT(50000baseCR2_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
|
||||
if (TEST_BIT(Pause))
|
||||
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
|
||||
if (TEST_BIT(Asym_Pause))
|
||||
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
|
||||
if (TEST_BIT(Autoneg))
|
||||
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
|
||||
|
||||
#undef TEST_BIT
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
enum efx_phy_mode mode, supported;
|
||||
u32 flags;
|
||||
|
||||
/* TODO: Advertise the capabilities supported by this PHY */
|
||||
supported = 0;
|
||||
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
|
||||
supported |= PHY_MODE_TX_DISABLED;
|
||||
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
|
||||
supported |= PHY_MODE_LOW_POWER;
|
||||
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
|
||||
supported |= PHY_MODE_OFF;
|
||||
|
||||
mode = efx->phy_mode & supported;
|
||||
|
||||
flags = 0;
|
||||
if (mode & PHY_MODE_TX_DISABLED)
|
||||
flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
|
||||
if (mode & PHY_MODE_LOW_POWER)
|
||||
flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
|
||||
if (mode & PHY_MODE_OFF)
|
||||
flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
u8 mcdi_to_ethtool_media(u32 media)
|
||||
{
|
||||
switch (media) {
|
||||
case MC_CMD_MEDIA_XAUI:
|
||||
case MC_CMD_MEDIA_CX4:
|
||||
case MC_CMD_MEDIA_KX4:
|
||||
return PORT_OTHER;
|
||||
|
||||
case MC_CMD_MEDIA_XFP:
|
||||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
return PORT_FIBRE;
|
||||
|
||||
case MC_CMD_MEDIA_BASE_T:
|
||||
return PORT_TP;
|
||||
|
||||
default:
|
||||
return PORT_OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
|
||||
struct efx_link_state *link_state,
|
||||
u32 speed, u32 flags, u32 fcntl)
|
||||
{
|
||||
switch (fcntl) {
|
||||
case MC_CMD_FCNTL_AUTO:
|
||||
WARN_ON(1); /* This is not a link mode */
|
||||
link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
|
||||
break;
|
||||
case MC_CMD_FCNTL_BIDIR:
|
||||
link_state->fc = EFX_FC_TX | EFX_FC_RX;
|
||||
break;
|
||||
case MC_CMD_FCNTL_RESPOND:
|
||||
link_state->fc = EFX_FC_RX;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
/* Fall through */
|
||||
case MC_CMD_FCNTL_OFF:
|
||||
link_state->fc = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
|
||||
link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
|
||||
link_state->speed = speed;
|
||||
}
|
||||
|
||||
/* The semantics of the ethtool FEC mode bitmask are not well defined,
|
||||
* particularly the meaning of combinations of bits. Which means we get to
|
||||
* define our own semantics, as follows:
|
||||
* OFF overrides any other bits, and means "disable all FEC" (with the
|
||||
* exception of 25G KR4/CR4, where it is not possible to reject it if AN
|
||||
* partner requests it).
|
||||
* AUTO on its own means use cable requirements and link partner autoneg with
|
||||
* fw-default preferences for the cable type.
|
||||
* AUTO and either RS or BASER means use the specified FEC type if cable and
|
||||
* link partner support it, otherwise autoneg/fw-default.
|
||||
* RS or BASER alone means use the specified FEC type if cable and link partner
|
||||
* support it and either requests it, otherwise no FEC.
|
||||
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
|
||||
* partner support it, preferring RS to BASER.
|
||||
*/
|
||||
u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
|
||||
{
|
||||
u32 ret = 0;
|
||||
|
||||
if (ethtool_cap & ETHTOOL_FEC_OFF)
|
||||
return 0;
|
||||
|
||||
if (ethtool_cap & ETHTOOL_FEC_AUTO)
|
||||
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
|
||||
if (ethtool_cap & ETHTOOL_FEC_RS)
|
||||
ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
|
||||
if (ethtool_cap & ETHTOOL_FEC_BASER)
|
||||
ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
|
||||
(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function
|
||||
* can never produce, (baser xor rs) and neither req; the implementation below
|
||||
* maps both of those to AUTO. This should never matter, and it's not clear
|
||||
* what a better mapping would be anyway.
|
||||
*/
|
||||
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
|
||||
{
|
||||
bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
|
||||
rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
|
||||
baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
|
||||
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
|
||||
baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
|
||||
: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
|
||||
|
||||
if (!baser && !rs)
|
||||
return ETHTOOL_FEC_OFF;
|
||||
return (rs_req ? ETHTOOL_FEC_RS : 0) |
|
||||
(baser_req ? ETHTOOL_FEC_BASER : 0) |
|
||||
(baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
|
||||
}
|
||||
|
||||
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
|
||||
* supported by the link partner. Warn the user if this isn't the case
|
||||
*/
|
||||
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
|
||||
u32 rmtadv;
|
||||
|
||||
/* The link partner capabilities are only relevant if the
|
||||
* link supports flow control autonegotiation
|
||||
*/
|
||||
if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
|
||||
return;
|
||||
|
||||
/* If flow control autoneg is supported and enabled, then fine */
|
||||
if (efx->wanted_fc & EFX_FC_AUTO)
|
||||
return;
|
||||
|
||||
rmtadv = 0;
|
||||
if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
|
||||
rmtadv |= ADVERTISED_Pause;
|
||||
if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
|
||||
rmtadv |= ADVERTISED_Asym_Pause;
|
||||
|
||||
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
|
||||
netif_err(efx, link, efx->net_dev,
|
||||
"warning: link partner doesn't support pause frames");
|
||||
}
|
||||
|
||||
bool efx_mcdi_phy_poll(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_link_state old_state = efx->link_state;
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
|
||||
int rc;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&efx->mac_lock));
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
|
||||
outbuf, sizeof(outbuf), NULL);
|
||||
if (rc)
|
||||
efx->link_state.up = false;
|
||||
else
|
||||
efx_mcdi_phy_decode_link(
|
||||
efx, &efx->link_state,
|
||||
MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
|
||||
MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
|
||||
MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
|
||||
|
||||
return !efx_link_state_equal(&efx->link_state, &old_state);
|
||||
}
|
||||
|
||||
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
|
||||
u32 caps, active, speed; /* MCDI format */
|
||||
bool is_25g = false;
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* behaviour for 25G/50G links depends on 25G BASER bit */
|
||||
speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
|
||||
is_25g = speed == 25000 || speed == 50000;
|
||||
|
||||
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
|
||||
fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
|
||||
/* BASER is never supported on 100G */
|
||||
if (speed == 100000)
|
||||
fec->fec &= ~ETHTOOL_FEC_BASER;
|
||||
|
||||
active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
|
||||
switch (active) {
|
||||
case MC_CMD_FEC_NONE:
|
||||
fec->active_fec = ETHTOOL_FEC_OFF;
|
||||
break;
|
||||
case MC_CMD_FEC_BASER:
|
||||
fec->active_fec = ETHTOOL_FEC_BASER;
|
||||
break;
|
||||
case MC_CMD_FEC_RS:
|
||||
fec->active_fec = ETHTOOL_FEC_RS;
|
||||
break;
|
||||
default:
|
||||
netif_warn(efx, hw, efx->net_dev,
|
||||
"Firmware reports unrecognised FEC_TYPE %u\n",
|
||||
active);
|
||||
/* We don't know what firmware has picked. AUTO is as good a
|
||||
* "can't happen" value as any other.
|
||||
*/
|
||||
fec->active_fec = ETHTOOL_FEC_AUTO;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mcdi_phy_test_alive(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
|
||||
return -EIO;
|
||||
if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx)
|
||||
{
|
||||
u32 fcntl;
|
||||
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
|
||||
|
||||
/* This has no effect on EF10 */
|
||||
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
|
||||
efx->net_dev->dev_addr);
|
||||
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
|
||||
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
|
||||
|
||||
/* Set simple MAC filter for Siena */
|
||||
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
|
||||
SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
|
||||
|
||||
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
|
||||
SET_MAC_IN_FLAG_INCLUDE_FCS,
|
||||
!!(efx->net_dev->features & NETIF_F_RXFCS));
|
||||
|
||||
switch (efx->wanted_fc) {
|
||||
case EFX_FC_RX | EFX_FC_TX:
|
||||
fcntl = MC_CMD_FCNTL_BIDIR;
|
||||
break;
|
||||
case EFX_FC_RX:
|
||||
fcntl = MC_CMD_FCNTL_RESPOND;
|
||||
break;
|
||||
default:
|
||||
fcntl = MC_CMD_FCNTL_OFF;
|
||||
break;
|
||||
}
|
||||
if (efx->wanted_fc & EFX_FC_AUTO)
|
||||
fcntl = MC_CMD_FCNTL_AUTO;
|
||||
if (efx->fc_disable)
|
||||
fcntl = MC_CMD_FCNTL_OFF;
|
||||
|
||||
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
|
||||
|
||||
return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
|
||||
NULL, 0, NULL);
|
||||
}
|
||||
|
||||
/* Get physical port number (EF10 only; on Siena it is same as PF number) */
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
|
||||
outbuf, sizeof(outbuf), NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
|
||||
}
|
||||
|
||||
static unsigned int efx_mcdi_event_link_speed[] = {
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
|
||||
[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
|
||||
};
|
||||
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
u32 flags, fcntl, speed, lpa;
|
||||
|
||||
speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
|
||||
EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
|
||||
speed = efx_mcdi_event_link_speed[speed];
|
||||
|
||||
flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
|
||||
fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
|
||||
lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
|
||||
|
||||
/* efx->link_state is only modified by efx_mcdi_phy_get_link(),
|
||||
* which is only run after flushing the event queues. Therefore, it
|
||||
* is safe to modify the link state outside of the mac_lock here.
|
||||
*/
|
||||
efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
|
||||
|
||||
efx_mcdi_phy_check_fcntl(efx, lpa);
|
||||
|
||||
efx_link_status_changed(efx);
|
||||
}
|
57
drivers/net/ethernet/sfc/mcdi_port_common.h
Normal file
57
drivers/net/ethernet/sfc/mcdi_port_common.h
Normal file
@@ -0,0 +1,57 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#ifndef EFX_MCDI_PORT_COMMON_H
|
||||
#define EFX_MCDI_PORT_COMMON_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
struct efx_mcdi_phy_data {
|
||||
u32 flags;
|
||||
u32 type;
|
||||
u32 supported_cap;
|
||||
u32 channel;
|
||||
u32 port;
|
||||
u32 stats_mask;
|
||||
u8 name[20];
|
||||
u32 media;
|
||||
u32 mmd_mask;
|
||||
u8 revision[20];
|
||||
u32 forced_cap;
|
||||
};
|
||||
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
|
||||
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
|
||||
void efx_link_set_advertising(struct efx_nic *efx,
|
||||
const unsigned long *advertising);
|
||||
int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
|
||||
u32 flags, u32 loopback_mode, u32 loopback_speed);
|
||||
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
|
||||
void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
|
||||
u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
|
||||
u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
|
||||
u8 mcdi_to_ethtool_media(u32 media);
|
||||
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
|
||||
struct efx_link_state *link_state,
|
||||
u32 speed, u32 flags, u32 fcntl);
|
||||
u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
|
||||
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
|
||||
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
|
||||
bool efx_mcdi_phy_poll(struct efx_nic *efx);
|
||||
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
|
||||
struct ethtool_fecparam *fec);
|
||||
int efx_mcdi_phy_test_alive(struct efx_nic *efx);
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx);
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
|
||||
#endif
|
@@ -24,7 +24,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/xdp.h>
|
||||
@@ -139,6 +138,8 @@ struct efx_special_buffer {
|
||||
* freed when descriptor completes
|
||||
* @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
|
||||
* member is the associated buffer to drop a page reference on.
|
||||
* @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
|
||||
* descriptor.
|
||||
* @dma_addr: DMA address of the fragment.
|
||||
* @flags: Flags for allocation and DMA mapping type
|
||||
* @len: Length of this fragment.
|
||||
@@ -153,7 +154,7 @@ struct efx_tx_buffer {
|
||||
struct xdp_frame *xdpf;
|
||||
};
|
||||
union {
|
||||
efx_qword_t option;
|
||||
efx_qword_t option; /* EF10 */
|
||||
dma_addr_t dma_addr;
|
||||
};
|
||||
unsigned short flags;
|
||||
@@ -743,13 +744,13 @@ union efx_multicast_hash {
|
||||
struct vfdi_status;
|
||||
|
||||
/* The reserved RSS context value */
|
||||
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
|
||||
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
|
||||
/**
|
||||
* struct efx_rss_context - A user-defined RSS context for filtering
|
||||
* @list: node of linked list on which this struct is stored
|
||||
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
|
||||
* %EFX_EF10_RSS_CONTEXT_INVALID if this context is not present on the NIC.
|
||||
* For Siena, 0 if RSS is active, else %EFX_EF10_RSS_CONTEXT_INVALID.
|
||||
* %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
|
||||
* For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
|
||||
* @user_id: the rss_context ID exposed to userspace over ethtool.
|
||||
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
|
||||
* @rx_hash_key: Toeplitz hash key for this RSS context
|
||||
@@ -1611,6 +1612,15 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
return &rx_queue->buffer[index];
|
||||
}
|
||||
|
||||
static inline struct efx_rx_buffer *
|
||||
efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
|
||||
return efx_rx_buffer(rx_queue, 0);
|
||||
else
|
||||
return rx_buf + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* EFX_MAX_FRAME_LEN - calculate maximum frame length
|
||||
*
|
||||
|
@@ -9,9 +9,9 @@
|
||||
#define EFX_NIC_H
|
||||
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "efx_common.h"
|
||||
#include "mcdi.h"
|
||||
|
||||
enum {
|
||||
@@ -506,6 +506,9 @@ static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
|
||||
tx_queue->efx->type->tx_write(tx_queue);
|
||||
}
|
||||
|
||||
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
bool *data_mapped);
|
||||
|
||||
/* RX data path */
|
||||
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
@@ -554,6 +557,7 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_read_ack(channel);
|
||||
}
|
||||
|
||||
void efx_nic_event_test_start(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena queue operations */
|
||||
@@ -671,6 +675,7 @@ struct efx_farch_register_test {
|
||||
unsigned address;
|
||||
efx_oword_t mask;
|
||||
};
|
||||
|
||||
int efx_farch_test_registers(struct efx_nic *efx,
|
||||
const struct efx_farch_register_test *regs,
|
||||
size_t n_regs);
|
||||
|
@@ -21,6 +21,7 @@
|
||||
#include <linux/bpf_trace.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "rx_common.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
@@ -32,60 +33,13 @@
|
||||
/* Maximum rx prefix used by any architecture. */
|
||||
#define EFX_MAX_RX_PREFIX_SIZE 16
|
||||
|
||||
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
|
||||
* ring, this number is divided by the number of buffers per page to calculate
|
||||
* the number of pages to store in the RX page recycle ring.
|
||||
*/
|
||||
#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
|
||||
#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
|
||||
|
||||
/* Size of buffer allocated for skb header area. */
|
||||
#define EFX_SKB_HEADERS 128u
|
||||
|
||||
/* This is the percentage fill level below which new RX descriptors
|
||||
* will be added to the RX descriptor ring.
|
||||
*/
|
||||
static unsigned int rx_refill_threshold;
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
/*
|
||||
* RX maximum head room required.
|
||||
*
|
||||
* This must be at least 1 to prevent overflow, plus one packet-worth
|
||||
* to allow pipelined receives.
|
||||
*/
|
||||
#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
|
||||
|
||||
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
|
||||
{
|
||||
return page_address(buf->page) + buf->page_offset;
|
||||
}
|
||||
|
||||
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
|
||||
#else
|
||||
const u8 *data = eh + efx->rx_packet_hash_offset;
|
||||
return (u32)data[0] |
|
||||
(u32)data[1] << 8 |
|
||||
(u32)data[2] << 16 |
|
||||
(u32)data[3] << 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct efx_rx_buffer *
|
||||
efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
|
||||
return efx_rx_buffer(rx_queue, 0);
|
||||
else
|
||||
return rx_buf + 1;
|
||||
}
|
||||
|
||||
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int len)
|
||||
@@ -94,301 +48,6 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
void efx_rx_config_page_split(struct efx_nic *efx)
|
||||
{
|
||||
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
|
||||
XDP_PACKET_HEADROOM,
|
||||
EFX_RX_BUF_ALIGNMENT);
|
||||
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
|
||||
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
|
||||
efx->rx_page_buf_step);
|
||||
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
|
||||
efx->rx_bufs_per_page;
|
||||
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
|
||||
efx->rx_bufs_per_page);
|
||||
}
|
||||
|
||||
/* Check the RX page recycle ring for a page that can be reused. */
|
||||
static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct page *page;
|
||||
struct efx_rx_page_state *state;
|
||||
unsigned index;
|
||||
|
||||
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
|
||||
page = rx_queue->page_ring[index];
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
rx_queue->page_ring[index] = NULL;
|
||||
/* page_remove cannot exceed page_add. */
|
||||
if (rx_queue->page_remove != rx_queue->page_add)
|
||||
++rx_queue->page_remove;
|
||||
|
||||
/* If page_count is 1 then we hold the only reference to this page. */
|
||||
if (page_count(page) == 1) {
|
||||
++rx_queue->page_recycle_count;
|
||||
return page;
|
||||
} else {
|
||||
state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
++rx_queue->page_recycle_failed;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
|
||||
*
|
||||
* @rx_queue: Efx RX queue
|
||||
*
|
||||
* This allocates a batch of pages, maps them for DMA, and populates
|
||||
* struct efx_rx_buffers for each one. Return a negative error code or
|
||||
* 0 on success. If a single page can be used for multiple buffers,
|
||||
* then the page will either be inserted fully, or not at all.
|
||||
*/
|
||||
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
struct efx_rx_page_state *state;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned index, count;
|
||||
|
||||
count = 0;
|
||||
do {
|
||||
page = efx_reuse_page(rx_queue);
|
||||
if (page == NULL) {
|
||||
page = alloc_pages(__GFP_COMP |
|
||||
(atomic ? GFP_ATOMIC : GFP_KERNEL),
|
||||
efx->rx_buffer_order);
|
||||
if (unlikely(page == NULL))
|
||||
return -ENOMEM;
|
||||
dma_addr =
|
||||
dma_map_page(&efx->pci_dev->dev, page, 0,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
|
||||
dma_addr))) {
|
||||
__free_pages(page, efx->rx_buffer_order);
|
||||
return -EIO;
|
||||
}
|
||||
state = page_address(page);
|
||||
state->dma_addr = dma_addr;
|
||||
} else {
|
||||
state = page_address(page);
|
||||
dma_addr = state->dma_addr;
|
||||
}
|
||||
|
||||
dma_addr += sizeof(struct efx_rx_page_state);
|
||||
page_offset = sizeof(struct efx_rx_page_state);
|
||||
|
||||
do {
|
||||
index = rx_queue->added_count & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
|
||||
XDP_PACKET_HEADROOM;
|
||||
rx_buf->page = page;
|
||||
rx_buf->page_offset = page_offset + efx->rx_ip_align +
|
||||
XDP_PACKET_HEADROOM;
|
||||
rx_buf->len = efx->rx_dma_len;
|
||||
rx_buf->flags = 0;
|
||||
++rx_queue->added_count;
|
||||
get_page(page);
|
||||
dma_addr += efx->rx_page_buf_step;
|
||||
page_offset += efx->rx_page_buf_step;
|
||||
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
|
||||
|
||||
rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
|
||||
} while (++count < efx->rx_pages_per_batch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unmap a DMA-mapped page. This function is only called for the final RX
|
||||
* buffer in a page.
|
||||
*/
|
||||
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
struct page *page = rx_buf->page;
|
||||
|
||||
if (page) {
|
||||
struct efx_rx_page_state *state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev,
|
||||
state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int num_bufs)
|
||||
{
|
||||
do {
|
||||
if (rx_buf->page) {
|
||||
put_page(rx_buf->page);
|
||||
rx_buf->page = NULL;
|
||||
}
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
} while (--num_bufs);
|
||||
}
|
||||
|
||||
/* Attempt to recycle the page if there is an RX recycle ring; the page can
|
||||
* only be added if this is the final RX buffer, to prevent pages being used in
|
||||
* the descriptor ring and appearing in the recycle ring simultaneously.
|
||||
*/
|
||||
static void efx_recycle_rx_page(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
struct page *page = rx_buf->page;
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned index;
|
||||
|
||||
/* Only recycle the page after processing the final buffer. */
|
||||
if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
|
||||
return;
|
||||
|
||||
index = rx_queue->page_add & rx_queue->page_ptr_mask;
|
||||
if (rx_queue->page_ring[index] == NULL) {
|
||||
unsigned read_index = rx_queue->page_remove &
|
||||
rx_queue->page_ptr_mask;
|
||||
|
||||
/* The next slot in the recycle ring is available, but
|
||||
* increment page_remove if the read pointer currently
|
||||
* points here.
|
||||
*/
|
||||
if (read_index == index)
|
||||
++rx_queue->page_remove;
|
||||
rx_queue->page_ring[index] = page;
|
||||
++rx_queue->page_add;
|
||||
return;
|
||||
}
|
||||
++rx_queue->page_recycle_full;
|
||||
efx_unmap_rx_buffer(efx, rx_buf);
|
||||
put_page(rx_buf->page);
|
||||
}
|
||||
|
||||
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
/* Release the page reference we hold for the buffer. */
|
||||
if (rx_buf->page)
|
||||
put_page(rx_buf->page);
|
||||
|
||||
/* If this is the last buffer in a page, unmap and free it. */
|
||||
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
}
|
||||
rx_buf->page = NULL;
|
||||
}
|
||||
|
||||
/* Recycle the pages that are used by buffers that have just been received. */
|
||||
static void efx_recycle_rx_pages(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
do {
|
||||
efx_recycle_rx_page(channel, rx_buf);
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
} while (--n_frags);
|
||||
}
|
||||
|
||||
static void efx_discard_rx_packet(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
efx_recycle_rx_pages(channel, rx_buf, n_frags);
|
||||
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
|
||||
* @rx_queue: RX descriptor queue
|
||||
*
|
||||
* This will aim to fill the RX descriptor queue up to
|
||||
* @rx_queue->@max_fill. If there is insufficient atomic
|
||||
* memory to do so, a slow fill will be scheduled.
|
||||
*
|
||||
* The caller must provide serialisation (none is used here). In practise,
|
||||
* this means this function must run from the NAPI handler, or be called
|
||||
* when NAPI is disabled.
|
||||
*/
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int fill_level, batch_size;
|
||||
int space, rc = 0;
|
||||
|
||||
if (!rx_queue->refill_enabled)
|
||||
return;
|
||||
|
||||
/* Calculate current fill level, and exit if we don't need to fill */
|
||||
fill_level = (rx_queue->added_count - rx_queue->removed_count);
|
||||
EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
|
||||
if (fill_level >= rx_queue->fast_fill_trigger)
|
||||
goto out;
|
||||
|
||||
/* Record minimum fill level */
|
||||
if (unlikely(fill_level < rx_queue->min_fill)) {
|
||||
if (fill_level)
|
||||
rx_queue->min_fill = fill_level;
|
||||
}
|
||||
|
||||
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
|
||||
space = rx_queue->max_fill - fill_level;
|
||||
EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filling descriptor ring from"
|
||||
" level %d to level %d\n",
|
||||
efx_rx_queue_index(rx_queue), fill_level,
|
||||
rx_queue->max_fill);
|
||||
|
||||
|
||||
do {
|
||||
rc = efx_init_rx_buffers(rx_queue, atomic);
|
||||
if (unlikely(rc)) {
|
||||
/* Ensure that we don't leave the rx queue empty */
|
||||
efx_schedule_slow_fill(rx_queue);
|
||||
goto out;
|
||||
}
|
||||
} while ((space -= batch_size) >= batch_size);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filled descriptor ring "
|
||||
"to level %d\n", efx_rx_queue_index(rx_queue),
|
||||
rx_queue->added_count - rx_queue->removed_count);
|
||||
|
||||
out:
|
||||
if (rx_queue->notified_count != rx_queue->added_count)
|
||||
efx_nic_notify_rx_desc(rx_queue);
|
||||
}
|
||||
|
||||
void efx_rx_slow_fill(struct timer_list *t)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
|
||||
|
||||
/* Post an event to cause NAPI to run and refill the queue */
|
||||
efx_nic_generate_fill_event(rx_queue);
|
||||
++rx_queue->slow_fill_count;
|
||||
}
|
||||
|
||||
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
int len)
|
||||
@@ -412,53 +71,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
||||
}
|
||||
|
||||
/* Pass a received packet up through GRO. GRO can handle pages
|
||||
* regardless of checksum state and skbs with a good checksum.
|
||||
*/
|
||||
static void
|
||||
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags, u8 *eh)
|
||||
{
|
||||
struct napi_struct *napi = &channel->napi_str;
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = napi_get_frags(napi);
|
||||
if (unlikely(!skb)) {
|
||||
struct efx_rx_queue *rx_queue;
|
||||
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (efx->net_dev->features & NETIF_F_RXHASH)
|
||||
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
|
||||
PKT_HASH_TYPE_L3);
|
||||
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
|
||||
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
|
||||
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
|
||||
|
||||
for (;;) {
|
||||
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
||||
rx_buf->page, rx_buf->page_offset,
|
||||
rx_buf->len);
|
||||
rx_buf->page = NULL;
|
||||
skb->len += rx_buf->len;
|
||||
if (skb_shinfo(skb)->nr_frags == n_frags)
|
||||
break;
|
||||
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
|
||||
skb->data_len = skb->len;
|
||||
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
||||
|
||||
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
||||
|
||||
napi_gro_frags(napi);
|
||||
}
|
||||
|
||||
/* Allocate and construct an SKB around page fragments */
|
||||
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
@@ -805,174 +417,6 @@ out:
|
||||
channel->rx_pkt_n_frags = 0;
|
||||
}
|
||||
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
rx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating RX queue %d size %#x mask %#x\n",
|
||||
efx_rx_queue_index(rx_queue), efx->rxq_entries,
|
||||
rx_queue->ptr_mask);
|
||||
|
||||
/* Allocate RX buffers */
|
||||
rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!rx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = efx_nic_probe_rx(rx_queue);
|
||||
if (rc) {
|
||||
kfree(rx_queue->buffer);
|
||||
rx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void efx_init_rx_recycle_ring(struct efx_nic *efx,
|
||||
struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
unsigned int bufs_in_recycle_ring, page_ring_size;
|
||||
|
||||
/* Set the RX recycle ring size */
|
||||
#ifdef CONFIG_PPC64
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
|
||||
#else
|
||||
if (iommu_present(&pci_bus_type))
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
|
||||
else
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int max_fill, trigger, max_trigger;
|
||||
int rc = 0;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
/* Initialise ptr fields */
|
||||
rx_queue->added_count = 0;
|
||||
rx_queue->notified_count = 0;
|
||||
rx_queue->removed_count = 0;
|
||||
rx_queue->min_fill = -1U;
|
||||
efx_init_rx_recycle_ring(efx, rx_queue);
|
||||
|
||||
rx_queue->page_remove = 0;
|
||||
rx_queue->page_add = rx_queue->page_ptr_mask + 1;
|
||||
rx_queue->page_recycle_count = 0;
|
||||
rx_queue->page_recycle_failed = 0;
|
||||
rx_queue->page_recycle_full = 0;
|
||||
|
||||
/* Initialise limit fields */
|
||||
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
|
||||
max_trigger =
|
||||
max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
|
||||
if (rx_refill_threshold != 0) {
|
||||
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
||||
if (trigger > max_trigger)
|
||||
trigger = max_trigger;
|
||||
} else {
|
||||
trigger = max_trigger;
|
||||
}
|
||||
|
||||
rx_queue->max_fill = max_fill;
|
||||
rx_queue->fast_fill_trigger = trigger;
|
||||
rx_queue->refill_enabled = true;
|
||||
|
||||
/* Initialise XDP queue information */
|
||||
rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
|
||||
rx_queue->core_index);
|
||||
|
||||
if (rc) {
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"Failure to initialise XDP queue information rc=%d\n",
|
||||
rc);
|
||||
efx->xdp_rxq_info_failed = true;
|
||||
} else {
|
||||
rx_queue->xdp_rxq_info_valid = true;
|
||||
}
|
||||
|
||||
/* Set up RX descriptor ring */
|
||||
efx_nic_init_rx(rx_queue);
|
||||
}
|
||||
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
int i;
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
del_timer_sync(&rx_queue->slow_fill);
|
||||
|
||||
/* Release RX buffers from the current read ptr to the write ptr */
|
||||
if (rx_queue->buffer) {
|
||||
for (i = rx_queue->removed_count; i < rx_queue->added_count;
|
||||
i++) {
|
||||
unsigned index = i & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
efx_fini_rx_buffer(rx_queue, rx_buf);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unmap and release the pages in the recycle ring. Remove the ring. */
|
||||
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
|
||||
struct page *page = rx_queue->page_ring[i];
|
||||
struct efx_rx_page_state *state;
|
||||
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
}
|
||||
kfree(rx_queue->page_ring);
|
||||
rx_queue->page_ring = NULL;
|
||||
|
||||
if (rx_queue->xdp_rxq_info_valid)
|
||||
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
|
||||
|
||||
rx_queue->xdp_rxq_info_valid = false;
|
||||
}
|
||||
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
efx_nic_remove_rx(rx_queue);
|
||||
|
||||
kfree(rx_queue->buffer);
|
||||
rx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
|
||||
module_param(rx_refill_threshold, uint, 0444);
|
||||
MODULE_PARM_DESC(rx_refill_threshold,
|
||||
"RX descriptor ring refill threshold (%)");
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
||||
static void efx_filter_rfs_work(struct work_struct *data)
|
||||
@@ -1206,37 +650,3 @@ bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RFS_ACCEL */
|
||||
|
||||
/**
|
||||
* efx_filter_is_mc_recipient - test whether spec is a multicast recipient
|
||||
* @spec: Specification to test
|
||||
*
|
||||
* Return: %true if the specification is a non-drop RX filter that
|
||||
* matches a local MAC address I/G bit value of 1 or matches a local
|
||||
* IPv4 or IPv6 address value in the respective multicast address
|
||||
* range. Otherwise %false.
|
||||
*/
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
|
||||
{
|
||||
if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
|
||||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
|
||||
return false;
|
||||
|
||||
if (spec->match_flags &
|
||||
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
|
||||
is_multicast_ether_addr(spec->loc_mac))
|
||||
return true;
|
||||
|
||||
if ((spec->match_flags &
|
||||
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
|
||||
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
|
||||
if (spec->ether_type == htons(ETH_P_IP) &&
|
||||
ipv4_is_multicast(spec->loc_host[0]))
|
||||
return true;
|
||||
if (spec->ether_type == htons(ETH_P_IPV6) &&
|
||||
((const u8 *)spec->loc_host)[0] == 0xff)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
851
drivers/net/ethernet/sfc/rx_common.c
Normal file
851
drivers/net/ethernet/sfc/rx_common.c
Normal file
@@ -0,0 +1,851 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "net_driver.h"
|
||||
#include <linux/module.h>
|
||||
#include <linux/iommu.h>
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "rx_common.h"
|
||||
|
||||
/* This is the percentage fill level below which new RX descriptors
|
||||
* will be added to the RX descriptor ring.
|
||||
*/
|
||||
static unsigned int rx_refill_threshold;
|
||||
module_param(rx_refill_threshold, uint, 0444);
|
||||
MODULE_PARM_DESC(rx_refill_threshold,
|
||||
"RX descriptor ring refill threshold (%)");
|
||||
|
||||
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
|
||||
* ring, this number is divided by the number of buffers per page to calculate
|
||||
* the number of pages to store in the RX page recycle ring.
|
||||
*/
|
||||
#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
|
||||
#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
|
||||
|
||||
/* RX maximum head room required.
|
||||
*
|
||||
* This must be at least 1 to prevent overflow, plus one packet-worth
|
||||
* to allow pipelined receives.
|
||||
*/
|
||||
#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
|
||||
|
||||
/* Check the RX page recycle ring for a page that can be reused. */
|
||||
static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_page_state *state;
|
||||
unsigned int index;
|
||||
struct page *page;
|
||||
|
||||
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
|
||||
page = rx_queue->page_ring[index];
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
rx_queue->page_ring[index] = NULL;
|
||||
/* page_remove cannot exceed page_add. */
|
||||
if (rx_queue->page_remove != rx_queue->page_add)
|
||||
++rx_queue->page_remove;
|
||||
|
||||
/* If page_count is 1 then we hold the only reference to this page. */
|
||||
if (page_count(page) == 1) {
|
||||
++rx_queue->page_recycle_count;
|
||||
return page;
|
||||
} else {
|
||||
state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
++rx_queue->page_recycle_failed;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Attempt to recycle the page if there is an RX recycle ring; the page can
|
||||
* only be added if this is the final RX buffer, to prevent pages being used in
|
||||
* the descriptor ring and appearing in the recycle ring simultaneously.
|
||||
*/
|
||||
static void efx_recycle_rx_page(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct page *page = rx_buf->page;
|
||||
unsigned int index;
|
||||
|
||||
/* Only recycle the page after processing the final buffer. */
|
||||
if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
|
||||
return;
|
||||
|
||||
index = rx_queue->page_add & rx_queue->page_ptr_mask;
|
||||
if (rx_queue->page_ring[index] == NULL) {
|
||||
unsigned int read_index = rx_queue->page_remove &
|
||||
rx_queue->page_ptr_mask;
|
||||
|
||||
/* The next slot in the recycle ring is available, but
|
||||
* increment page_remove if the read pointer currently
|
||||
* points here.
|
||||
*/
|
||||
if (read_index == index)
|
||||
++rx_queue->page_remove;
|
||||
rx_queue->page_ring[index] = page;
|
||||
++rx_queue->page_add;
|
||||
return;
|
||||
}
|
||||
++rx_queue->page_recycle_full;
|
||||
efx_unmap_rx_buffer(efx, rx_buf);
|
||||
put_page(rx_buf->page);
|
||||
}
|
||||
|
||||
/* Recycle the pages that are used by buffers that have just been received. */
|
||||
void efx_recycle_rx_pages(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
do {
|
||||
efx_recycle_rx_page(channel, rx_buf);
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
} while (--n_frags);
|
||||
}
|
||||
|
||||
void efx_discard_rx_packet(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
efx_recycle_rx_pages(channel, rx_buf, n_frags);
|
||||
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
||||
}
|
||||
|
||||
static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
unsigned int bufs_in_recycle_ring, page_ring_size;
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
|
||||
/* Set the RX recycle ring size */
|
||||
#ifdef CONFIG_PPC64
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
|
||||
#else
|
||||
if (iommu_present(&pci_bus_type))
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
|
||||
else
|
||||
bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
int i;
|
||||
|
||||
/* Unmap and release the pages in the recycle ring. Remove the ring. */
|
||||
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
|
||||
struct page *page = rx_queue->page_ring[i];
|
||||
struct efx_rx_page_state *state;
|
||||
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
state = page_address(page);
|
||||
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
}
|
||||
kfree(rx_queue->page_ring);
|
||||
rx_queue->page_ring = NULL;
|
||||
}
|
||||
|
||||
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
/* Release the page reference we hold for the buffer. */
|
||||
if (rx_buf->page)
|
||||
put_page(rx_buf->page);
|
||||
|
||||
/* If this is the last buffer in a page, unmap and free it. */
|
||||
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
}
|
||||
rx_buf->page = NULL;
|
||||
}
|
||||
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
rx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating RX queue %d size %#x mask %#x\n",
|
||||
efx_rx_queue_index(rx_queue), efx->rxq_entries,
|
||||
rx_queue->ptr_mask);
|
||||
|
||||
/* Allocate RX buffers */
|
||||
rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!rx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = efx_nic_probe_rx(rx_queue);
|
||||
if (rc) {
|
||||
kfree(rx_queue->buffer);
|
||||
rx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
unsigned int max_fill, trigger, max_trigger;
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
int rc = 0;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
/* Initialise ptr fields */
|
||||
rx_queue->added_count = 0;
|
||||
rx_queue->notified_count = 0;
|
||||
rx_queue->removed_count = 0;
|
||||
rx_queue->min_fill = -1U;
|
||||
efx_init_rx_recycle_ring(rx_queue);
|
||||
|
||||
rx_queue->page_remove = 0;
|
||||
rx_queue->page_add = rx_queue->page_ptr_mask + 1;
|
||||
rx_queue->page_recycle_count = 0;
|
||||
rx_queue->page_recycle_failed = 0;
|
||||
rx_queue->page_recycle_full = 0;
|
||||
|
||||
/* Initialise limit fields */
|
||||
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
|
||||
max_trigger =
|
||||
max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
|
||||
if (rx_refill_threshold != 0) {
|
||||
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
|
||||
if (trigger > max_trigger)
|
||||
trigger = max_trigger;
|
||||
} else {
|
||||
trigger = max_trigger;
|
||||
}
|
||||
|
||||
rx_queue->max_fill = max_fill;
|
||||
rx_queue->fast_fill_trigger = trigger;
|
||||
rx_queue->refill_enabled = true;
|
||||
|
||||
/* Initialise XDP queue information */
|
||||
rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
|
||||
rx_queue->core_index);
|
||||
|
||||
if (rc) {
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"Failure to initialise XDP queue information rc=%d\n",
|
||||
rc);
|
||||
efx->xdp_rxq_info_failed = true;
|
||||
} else {
|
||||
rx_queue->xdp_rxq_info_valid = true;
|
||||
}
|
||||
|
||||
/* Set up RX descriptor ring */
|
||||
efx_nic_init_rx(rx_queue);
|
||||
}
|
||||
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
int i;
|
||||
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
del_timer_sync(&rx_queue->slow_fill);
|
||||
|
||||
/* Release RX buffers from the current read ptr to the write ptr */
|
||||
if (rx_queue->buffer) {
|
||||
for (i = rx_queue->removed_count; i < rx_queue->added_count;
|
||||
i++) {
|
||||
unsigned int index = i & rx_queue->ptr_mask;
|
||||
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
efx_fini_rx_buffer(rx_queue, rx_buf);
|
||||
}
|
||||
}
|
||||
|
||||
efx_fini_rx_recycle_ring(rx_queue);
|
||||
|
||||
if (rx_queue->xdp_rxq_info_valid)
|
||||
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
|
||||
|
||||
rx_queue->xdp_rxq_info_valid = false;
|
||||
}
|
||||
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
||||
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
|
||||
|
||||
efx_nic_remove_rx(rx_queue);
|
||||
|
||||
kfree(rx_queue->buffer);
|
||||
rx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
/* Unmap a DMA-mapped page. This function is only called for the final RX
|
||||
* buffer in a page.
|
||||
*/
|
||||
void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
struct page *page = rx_buf->page;
|
||||
|
||||
if (page) {
|
||||
struct efx_rx_page_state *state = page_address(page);
|
||||
|
||||
dma_unmap_page(&efx->pci_dev->dev,
|
||||
state->dma_addr,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
|
||||
void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int num_bufs)
|
||||
{
|
||||
do {
|
||||
if (rx_buf->page) {
|
||||
put_page(rx_buf->page);
|
||||
rx_buf->page = NULL;
|
||||
}
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
} while (--num_bufs);
|
||||
}
|
||||
|
||||
void efx_rx_slow_fill(struct timer_list *t)
|
||||
{
|
||||
struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
|
||||
|
||||
/* Post an event to cause NAPI to run and refill the queue */
|
||||
efx_nic_generate_fill_event(rx_queue);
|
||||
++rx_queue->slow_fill_count;
|
||||
}
|
||||
|
||||
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
|
||||
}
|
||||
|
||||
/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
|
||||
*
|
||||
* @rx_queue: Efx RX queue
|
||||
*
|
||||
* This allocates a batch of pages, maps them for DMA, and populates
|
||||
* struct efx_rx_buffers for each one. Return a negative error code or
|
||||
* 0 on success. If a single page can be used for multiple buffers,
|
||||
* then the page will either be inserted fully, or not at all.
|
||||
*/
|
||||
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
|
||||
{
|
||||
unsigned int page_offset, index, count;
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_page_state *state;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
dma_addr_t dma_addr;
|
||||
struct page *page;
|
||||
|
||||
count = 0;
|
||||
do {
|
||||
page = efx_reuse_page(rx_queue);
|
||||
if (page == NULL) {
|
||||
page = alloc_pages(__GFP_COMP |
|
||||
(atomic ? GFP_ATOMIC : GFP_KERNEL),
|
||||
efx->rx_buffer_order);
|
||||
if (unlikely(page == NULL))
|
||||
return -ENOMEM;
|
||||
dma_addr =
|
||||
dma_map_page(&efx->pci_dev->dev, page, 0,
|
||||
PAGE_SIZE << efx->rx_buffer_order,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
|
||||
dma_addr))) {
|
||||
__free_pages(page, efx->rx_buffer_order);
|
||||
return -EIO;
|
||||
}
|
||||
state = page_address(page);
|
||||
state->dma_addr = dma_addr;
|
||||
} else {
|
||||
state = page_address(page);
|
||||
dma_addr = state->dma_addr;
|
||||
}
|
||||
|
||||
dma_addr += sizeof(struct efx_rx_page_state);
|
||||
page_offset = sizeof(struct efx_rx_page_state);
|
||||
|
||||
do {
|
||||
index = rx_queue->added_count & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
|
||||
XDP_PACKET_HEADROOM;
|
||||
rx_buf->page = page;
|
||||
rx_buf->page_offset = page_offset + efx->rx_ip_align +
|
||||
XDP_PACKET_HEADROOM;
|
||||
rx_buf->len = efx->rx_dma_len;
|
||||
rx_buf->flags = 0;
|
||||
++rx_queue->added_count;
|
||||
get_page(page);
|
||||
dma_addr += efx->rx_page_buf_step;
|
||||
page_offset += efx->rx_page_buf_step;
|
||||
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
|
||||
|
||||
rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
|
||||
} while (++count < efx->rx_pages_per_batch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_rx_config_page_split(struct efx_nic *efx)
|
||||
{
|
||||
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
|
||||
XDP_PACKET_HEADROOM,
|
||||
EFX_RX_BUF_ALIGNMENT);
|
||||
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
|
||||
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
|
||||
efx->rx_page_buf_step);
|
||||
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
|
||||
efx->rx_bufs_per_page;
|
||||
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
|
||||
efx->rx_bufs_per_page);
|
||||
}
|
||||
|
||||
/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
|
||||
* @rx_queue: RX descriptor queue
|
||||
*
|
||||
* This will aim to fill the RX descriptor queue up to
|
||||
* @rx_queue->@max_fill. If there is insufficient atomic
|
||||
* memory to do so, a slow fill will be scheduled.
|
||||
*
|
||||
* The caller must provide serialisation (none is used here). In practise,
|
||||
* this means this function must run from the NAPI handler, or be called
|
||||
* when NAPI is disabled.
|
||||
*/
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned int fill_level, batch_size;
|
||||
int space, rc = 0;
|
||||
|
||||
if (!rx_queue->refill_enabled)
|
||||
return;
|
||||
|
||||
/* Calculate current fill level, and exit if we don't need to fill */
|
||||
fill_level = (rx_queue->added_count - rx_queue->removed_count);
|
||||
EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
|
||||
if (fill_level >= rx_queue->fast_fill_trigger)
|
||||
goto out;
|
||||
|
||||
/* Record minimum fill level */
|
||||
if (unlikely(fill_level < rx_queue->min_fill)) {
|
||||
if (fill_level)
|
||||
rx_queue->min_fill = fill_level;
|
||||
}
|
||||
|
||||
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
|
||||
space = rx_queue->max_fill - fill_level;
|
||||
EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filling descriptor ring from"
|
||||
" level %d to level %d\n",
|
||||
efx_rx_queue_index(rx_queue), fill_level,
|
||||
rx_queue->max_fill);
|
||||
|
||||
do {
|
||||
rc = efx_init_rx_buffers(rx_queue, atomic);
|
||||
if (unlikely(rc)) {
|
||||
/* Ensure that we don't leave the rx queue empty */
|
||||
efx_schedule_slow_fill(rx_queue);
|
||||
goto out;
|
||||
}
|
||||
} while ((space -= batch_size) >= batch_size);
|
||||
|
||||
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
|
||||
"RX queue %d fast-filled descriptor ring "
|
||||
"to level %d\n", efx_rx_queue_index(rx_queue),
|
||||
rx_queue->added_count - rx_queue->removed_count);
|
||||
|
||||
out:
|
||||
if (rx_queue->notified_count != rx_queue->added_count)
|
||||
efx_nic_notify_rx_desc(rx_queue);
|
||||
}
|
||||
|
||||
/* Pass a received packet up through GRO. GRO can handle pages
|
||||
* regardless of checksum state and skbs with a good checksum.
|
||||
*/
|
||||
void
|
||||
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags, u8 *eh)
|
||||
{
|
||||
struct napi_struct *napi = &channel->napi_str;
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = napi_get_frags(napi);
|
||||
if (unlikely(!skb)) {
|
||||
struct efx_rx_queue *rx_queue;
|
||||
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (efx->net_dev->features & NETIF_F_RXHASH)
|
||||
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
|
||||
PKT_HASH_TYPE_L3);
|
||||
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
|
||||
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
|
||||
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
|
||||
|
||||
for (;;) {
|
||||
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
||||
rx_buf->page, rx_buf->page_offset,
|
||||
rx_buf->len);
|
||||
rx_buf->page = NULL;
|
||||
skb->len += rx_buf->len;
|
||||
if (skb_shinfo(skb)->nr_frags == n_frags)
|
||||
break;
|
||||
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
|
||||
skb->data_len = skb->len;
|
||||
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
||||
|
||||
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
||||
|
||||
napi_gro_frags(napi);
|
||||
}
|
||||
|
||||
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
|
||||
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
|
||||
*/
|
||||
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
|
||||
{
|
||||
struct list_head *head = &efx->rss_context.list;
|
||||
struct efx_rss_context *ctx, *new;
|
||||
u32 id = 1; /* Don't use zero, that refers to the master RSS context */
|
||||
|
||||
WARN_ON(!mutex_is_locked(&efx->rss_lock));
|
||||
|
||||
/* Search for first gap in the numbering */
|
||||
list_for_each_entry(ctx, head, list) {
|
||||
if (ctx->user_id != id)
|
||||
break;
|
||||
id++;
|
||||
/* Check for wrap. If this happens, we have nearly 2^32
|
||||
* allocated RSS contexts, which seems unlikely.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!id))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Create the new entry */
|
||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
return NULL;
|
||||
new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
|
||||
new->rx_hash_udp_4tuple = false;
|
||||
|
||||
/* Insert the new entry into the gap */
|
||||
new->user_id = id;
|
||||
list_add_tail(&new->list, &ctx->list);
|
||||
return new;
|
||||
}
|
||||
|
||||
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
|
||||
{
|
||||
struct list_head *head = &efx->rss_context.list;
|
||||
struct efx_rss_context *ctx;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&efx->rss_lock));
|
||||
|
||||
list_for_each_entry(ctx, head, list)
|
||||
if (ctx->user_id == id)
|
||||
return ctx;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void efx_free_rss_context_entry(struct efx_rss_context *ctx)
|
||||
{
|
||||
list_del(&ctx->list);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
void efx_set_default_rx_indir_table(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
|
||||
ctx->rx_indir_table[i] =
|
||||
ethtool_rxfh_indir_default(i, efx->rss_spread);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_is_mc_recipient - test whether spec is a multicast recipient
|
||||
* @spec: Specification to test
|
||||
*
|
||||
* Return: %true if the specification is a non-drop RX filter that
|
||||
* matches a local MAC address I/G bit value of 1 or matches a local
|
||||
* IPv4 or IPv6 address value in the respective multicast address
|
||||
* range. Otherwise %false.
|
||||
*/
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
|
||||
{
|
||||
if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
|
||||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
|
||||
return false;
|
||||
|
||||
if (spec->match_flags &
|
||||
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
|
||||
is_multicast_ether_addr(spec->loc_mac))
|
||||
return true;
|
||||
|
||||
if ((spec->match_flags &
|
||||
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
|
||||
(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
|
||||
if (spec->ether_type == htons(ETH_P_IP) &&
|
||||
ipv4_is_multicast(spec->loc_host[0]))
|
||||
return true;
|
||||
if (spec->ether_type == htons(ETH_P_IPV6) &&
|
||||
((const u8 *)spec->loc_host)[0] == 0xff)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right)
|
||||
{
|
||||
if ((left->match_flags ^ right->match_flags) |
|
||||
((left->flags ^ right->flags) &
|
||||
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
|
||||
return false;
|
||||
|
||||
return memcmp(&left->outer_vid, &right->outer_vid,
|
||||
sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) == 0;
|
||||
}
|
||||
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
|
||||
return jhash2((const u32 *)&spec->outer_vid,
|
||||
(sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) / 4,
|
||||
0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
|
||||
bool *force)
|
||||
{
|
||||
if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
|
||||
/* ARFS is currently updating this entry, leave it */
|
||||
return false;
|
||||
}
|
||||
if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
|
||||
/* ARFS tried and failed to update this, so it's probably out
|
||||
* of date. Remove the filter and the ARFS rule entry.
|
||||
*/
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
|
||||
*force = true;
|
||||
return true;
|
||||
} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
|
||||
/* ARFS has moved on, so old filter is not needed. Since we did
|
||||
* not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
|
||||
* not be removed by efx_rps_hash_del() subsequently.
|
||||
*/
|
||||
*force = true;
|
||||
return true;
|
||||
}
|
||||
/* Remove it iff ARFS wants to. */
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec)
|
||||
{
|
||||
u32 hash = efx_filter_spec_hash(spec);
|
||||
|
||||
lockdep_assert_held(&efx->rps_hash_lock);
|
||||
if (!efx->rps_hash_table)
|
||||
return NULL;
|
||||
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
|
||||
}
|
||||
|
||||
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec)
|
||||
{
|
||||
struct efx_arfs_rule *rule;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
|
||||
head = efx_rps_hash_bucket(efx, spec);
|
||||
if (!head)
|
||||
return NULL;
|
||||
hlist_for_each(node, head) {
|
||||
rule = container_of(node, struct efx_arfs_rule, node);
|
||||
if (efx_filter_spec_equal(spec, &rule->spec))
|
||||
return rule;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec,
|
||||
bool *new)
|
||||
{
|
||||
struct efx_arfs_rule *rule;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
|
||||
head = efx_rps_hash_bucket(efx, spec);
|
||||
if (!head)
|
||||
return NULL;
|
||||
hlist_for_each(node, head) {
|
||||
rule = container_of(node, struct efx_arfs_rule, node);
|
||||
if (efx_filter_spec_equal(spec, &rule->spec)) {
|
||||
*new = false;
|
||||
return rule;
|
||||
}
|
||||
}
|
||||
rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
|
||||
*new = true;
|
||||
if (rule) {
|
||||
memcpy(&rule->spec, spec, sizeof(rule->spec));
|
||||
hlist_add_head(&rule->node, head);
|
||||
}
|
||||
return rule;
|
||||
}
|
||||
|
||||
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
|
||||
{
|
||||
struct efx_arfs_rule *rule;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
|
||||
head = efx_rps_hash_bucket(efx, spec);
|
||||
if (WARN_ON(!head))
|
||||
return;
|
||||
hlist_for_each(node, head) {
|
||||
rule = container_of(node, struct efx_arfs_rule, node);
|
||||
if (efx_filter_spec_equal(spec, &rule->spec)) {
|
||||
/* Someone already reused the entry. We know that if
|
||||
* this check doesn't fire (i.e. filter_id == REMOVING)
|
||||
* then the REMOVING mark was put there by our caller,
|
||||
* because caller is holding a lock on filter table and
|
||||
* only holders of that lock set REMOVING.
|
||||
*/
|
||||
if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
|
||||
return;
|
||||
hlist_del(node);
|
||||
kfree(rule);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* We didn't find it. */
|
||||
WARN_ON(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
int efx_probe_filters(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
init_rwsem(&efx->filter_sem);
|
||||
mutex_lock(&efx->mac_lock);
|
||||
down_write(&efx->filter_sem);
|
||||
rc = efx->type->filter_table_probe(efx);
|
||||
if (rc)
|
||||
goto out_unlock;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->type->offload_features & NETIF_F_NTUPLE) {
|
||||
struct efx_channel *channel;
|
||||
int i, success = 1;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
channel->rps_flow_id =
|
||||
kcalloc(efx->type->max_rx_ip_filters,
|
||||
sizeof(*channel->rps_flow_id),
|
||||
GFP_KERNEL);
|
||||
if (!channel->rps_flow_id)
|
||||
success = 0;
|
||||
else
|
||||
for (i = 0;
|
||||
i < efx->type->max_rx_ip_filters;
|
||||
++i)
|
||||
channel->rps_flow_id[i] =
|
||||
RPS_FLOW_ID_INVALID;
|
||||
channel->rfs_expire_index = 0;
|
||||
channel->rfs_filter_count = 0;
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
efx_for_each_channel(channel, efx)
|
||||
kfree(channel->rps_flow_id);
|
||||
efx->type->filter_table_remove(efx);
|
||||
rc = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
out_unlock:
|
||||
up_write(&efx->filter_sem);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_remove_filters(struct efx_nic *efx)
|
||||
{
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
cancel_delayed_work_sync(&channel->filter_work);
|
||||
kfree(channel->rps_flow_id);
|
||||
}
|
||||
#endif
|
||||
down_write(&efx->filter_sem);
|
||||
efx->type->filter_table_remove(efx);
|
||||
up_write(&efx->filter_sem);
|
||||
}
|
97
drivers/net/ethernet/sfc/rx_common.h
Normal file
97
drivers/net/ethernet/sfc/rx_common.h
Normal file
@@ -0,0 +1,97 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_RX_COMMON_H
|
||||
#define EFX_RX_COMMON_H
|
||||
|
||||
/* Preferred number of descriptors to fill at once */
|
||||
#define EFX_RX_PREFERRED_BATCH 8U
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
|
||||
{
|
||||
return page_address(buf->page) + buf->page_offset;
|
||||
}
|
||||
|
||||
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
|
||||
#else
|
||||
const u8 *data = eh + efx->rx_packet_hash_offset;
|
||||
|
||||
return (u32)data[0] |
|
||||
(u32)data[1] << 8 |
|
||||
(u32)data[2] << 16 |
|
||||
(u32)data[3] << 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
void efx_rx_slow_fill(struct timer_list *t);
|
||||
|
||||
void efx_recycle_rx_pages(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags);
|
||||
void efx_discard_rx_packet(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags);
|
||||
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
|
||||
void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct page *page,
|
||||
unsigned int page_offset,
|
||||
u16 flags);
|
||||
void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
|
||||
void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int num_bufs);
|
||||
|
||||
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
||||
void efx_rx_config_page_split(struct efx_nic *efx);
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
|
||||
|
||||
void
|
||||
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags, u8 *eh);
|
||||
|
||||
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
|
||||
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
|
||||
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
|
||||
void efx_set_default_rx_indir_table(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx);
|
||||
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
|
||||
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right);
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
|
||||
bool *force);
|
||||
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec);
|
||||
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec,
|
||||
bool *new);
|
||||
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
|
||||
#endif
|
||||
|
||||
int efx_probe_filters(struct efx_nic *efx);
|
||||
void efx_remove_filters(struct efx_nic *efx);
|
||||
|
||||
#endif
|
@@ -18,6 +18,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "efx_common.h"
|
||||
#include "efx_channels.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
@@ -783,7 +785,7 @@ void efx_selftest_async_cancel(struct efx_nic *efx)
|
||||
cancel_delayed_work_sync(&efx->selftest_work);
|
||||
}
|
||||
|
||||
void efx_selftest_async_work(struct work_struct *data)
|
||||
static void efx_selftest_async_work(struct work_struct *data)
|
||||
{
|
||||
struct efx_nic *efx = container_of(data, struct efx_nic,
|
||||
selftest_work.work);
|
||||
@@ -802,3 +804,8 @@ void efx_selftest_async_work(struct work_struct *data)
|
||||
channel->channel, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void efx_selftest_async_init(struct efx_nic *efx)
|
||||
{
|
||||
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
|
||||
}
|
||||
|
@@ -45,8 +45,8 @@ void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
|
||||
int pkt_len);
|
||||
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags);
|
||||
void efx_selftest_async_init(struct efx_nic *efx);
|
||||
void efx_selftest_async_start(struct efx_nic *efx);
|
||||
void efx_selftest_async_cancel(struct efx_nic *efx);
|
||||
void efx_selftest_async_work(struct work_struct *data);
|
||||
|
||||
#endif /* EFX_SELFTEST_H */
|
||||
|
@@ -14,12 +14,14 @@
|
||||
#include "net_driver.h"
|
||||
#include "bitfield.h"
|
||||
#include "efx.h"
|
||||
#include "efx_common.h"
|
||||
#include "nic.h"
|
||||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "workarounds.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "mcdi_port_common.h"
|
||||
#include "selftest.h"
|
||||
#include "siena_sriov.h"
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <linux/module.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "efx_channels.h"
|
||||
#include "nic.h"
|
||||
#include "io.h"
|
||||
#include "mcdi.h"
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include "io.h"
|
||||
#include "nic.h"
|
||||
#include "tx.h"
|
||||
#include "tx_common.h"
|
||||
#include "workarounds.h"
|
||||
#include "ef10_regs.h"
|
||||
|
||||
@@ -56,72 +57,6 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
|
||||
return efx_tx_get_copy_buffer(tx_queue, buffer);
|
||||
}
|
||||
|
||||
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
if (buffer->unmap_len) {
|
||||
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
||||
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
|
||||
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
||||
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
buffer->unmap_len = 0;
|
||||
}
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
|
||||
|
||||
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
|
||||
(*pkts_compl)++;
|
||||
(*bytes_compl) += skb->len;
|
||||
if (tx_queue->timestamping &&
|
||||
(tx_queue->completed_timestamp_major ||
|
||||
tx_queue->completed_timestamp_minor)) {
|
||||
struct skb_shared_hwtstamps hwtstamp;
|
||||
|
||||
hwtstamp.hwtstamp =
|
||||
efx_ptp_nic_to_kernel_time(tx_queue);
|
||||
skb_tstamp_tx(skb, &hwtstamp);
|
||||
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
}
|
||||
dev_consume_skb_any((struct sk_buff *)buffer->skb);
|
||||
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
|
||||
"TX queue %d transmission id %x complete\n",
|
||||
tx_queue->queue, tx_queue->read_count);
|
||||
} else if (buffer->flags & EFX_TX_BUF_XDP) {
|
||||
xdp_return_frame_rx_napi(buffer->xdpf);
|
||||
}
|
||||
|
||||
buffer->len = 0;
|
||||
buffer->flags = 0;
|
||||
}
|
||||
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
||||
{
|
||||
/* Header and payload descriptor for each output segment, plus
|
||||
* one for every input fragment boundary within a segment
|
||||
*/
|
||||
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
|
||||
|
||||
/* Possibly one more per segment for option descriptors */
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
max_descs += EFX_TSO_MAX_SEGS;
|
||||
|
||||
/* Possibly more for PCIe page boundaries within input fragments */
|
||||
if (PAGE_SIZE > EFX_PAGE_SIZE)
|
||||
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
||||
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
|
||||
|
||||
return max_descs;
|
||||
}
|
||||
|
||||
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
|
||||
{
|
||||
/* We need to consider both queues that the net core sees as one */
|
||||
@@ -333,125 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
|
||||
}
|
||||
#endif /* EFX_USE_PIO */
|
||||
|
||||
static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr,
|
||||
size_t len)
|
||||
{
|
||||
const struct efx_nic_type *nic_type = tx_queue->efx->type;
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int dma_len;
|
||||
|
||||
/* Map the fragment taking account of NIC-dependent DMA limits. */
|
||||
do {
|
||||
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
|
||||
|
||||
buffer->len = dma_len;
|
||||
buffer->dma_addr = dma_addr;
|
||||
buffer->flags = EFX_TX_BUF_CONT;
|
||||
len -= dma_len;
|
||||
dma_addr += dma_len;
|
||||
++tx_queue->insert_count;
|
||||
} while (len);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/* Map all data from an SKB for DMA and create descriptors on the queue.
|
||||
*/
|
||||
static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct device *dma_dev = &efx->pci_dev->dev;
|
||||
unsigned int frag_index, nr_frags;
|
||||
dma_addr_t dma_addr, unmap_addr;
|
||||
unsigned short dma_flags;
|
||||
size_t len, unmap_len;
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
frag_index = 0;
|
||||
|
||||
/* Map header data. */
|
||||
len = skb_headlen(skb);
|
||||
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
|
||||
dma_flags = EFX_TX_BUF_MAP_SINGLE;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
|
||||
if (segment_count) {
|
||||
/* For TSO we need to put the header in to a separate
|
||||
* descriptor. Map this separately if necessary.
|
||||
*/
|
||||
size_t header_len = skb_transport_header(skb) - skb->data +
|
||||
(tcp_hdr(skb)->doff << 2u);
|
||||
|
||||
if (header_len != len) {
|
||||
tx_queue->tso_long_headers++;
|
||||
efx_tx_map_chunk(tx_queue, dma_addr, header_len);
|
||||
len -= header_len;
|
||||
dma_addr += header_len;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add descriptors for each fragment. */
|
||||
do {
|
||||
struct efx_tx_buffer *buffer;
|
||||
skb_frag_t *fragment;
|
||||
|
||||
buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
|
||||
|
||||
/* The final descriptor for a fragment is responsible for
|
||||
* unmapping the whole fragment.
|
||||
*/
|
||||
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
||||
buffer->unmap_len = unmap_len;
|
||||
buffer->dma_offset = buffer->dma_addr - unmap_addr;
|
||||
|
||||
if (frag_index >= nr_frags) {
|
||||
/* Store SKB details with the final buffer for
|
||||
* the completion.
|
||||
*/
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Move on to the next fragment. */
|
||||
fragment = &skb_shinfo(skb)->frags[frag_index++];
|
||||
len = skb_frag_size(fragment);
|
||||
dma_addr = skb_frag_dma_map(dma_dev, fragment,
|
||||
0, len, DMA_TO_DEVICE);
|
||||
dma_flags = 0;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
/* Remove buffers put into a tx_queue for the current packet.
|
||||
* None of the buffers must have an skb attached.
|
||||
*/
|
||||
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0;
|
||||
|
||||
/* Work backwards until we hit the original insert pointer value */
|
||||
while (tx_queue->insert_count != insert_count) {
|
||||
--tx_queue->insert_count;
|
||||
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to software TSO.
|
||||
*
|
||||
@@ -473,12 +289,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
|
||||
dev_consume_skb_any(skb);
|
||||
skb = segments;
|
||||
|
||||
while (skb) {
|
||||
next = skb->next;
|
||||
skb->next = NULL;
|
||||
|
||||
skb_list_walk_safe(skb, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
skb = next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -687,41 +500,6 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
|
||||
return i;
|
||||
}
|
||||
|
||||
/* Remove packets from the TX queue
|
||||
*
|
||||
* This removes packets from the TX queue, up to and including the
|
||||
* specified index.
|
||||
*/
|
||||
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
unsigned int index,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int stop_index, read_ptr;
|
||||
|
||||
stop_index = (index + 1) & tx_queue->ptr_mask;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
|
||||
while (read_ptr != stop_index) {
|
||||
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
|
||||
|
||||
if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
|
||||
unlikely(buffer->len == 0)) {
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"TX queue %d spurious TX completion id %x\n",
|
||||
tx_queue->queue, read_ptr);
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
|
||||
return;
|
||||
}
|
||||
|
||||
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission. We use one channel per CPU
|
||||
* (sharing when we have more CPUs than channels). On Falcon, the TX
|
||||
* completion events will be directed back to the CPU that transmitted
|
||||
@@ -834,173 +612,3 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
|
||||
net_dev->num_tc = num_tc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
unsigned fill_level;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_tx_queue *txq2;
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
|
||||
|
||||
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
|
||||
if (pkts_compl > 1)
|
||||
++tx_queue->merge_events;
|
||||
|
||||
/* See if we need to restart the netif queue. This memory
|
||||
* barrier ensures that we write read_count (inside
|
||||
* efx_dequeue_buffers()) before reading the queue status.
|
||||
*/
|
||||
smp_mb();
|
||||
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
|
||||
likely(efx->port_enabled) &&
|
||||
likely(netif_device_present(efx->net_dev))) {
|
||||
txq2 = efx_tx_queue_partner(tx_queue);
|
||||
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
|
||||
txq2->insert_count - txq2->read_count);
|
||||
if (fill_level <= efx->txq_wake_thresh)
|
||||
netif_tx_wake_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
/* Check whether the hardware queue is now empty */
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
|
||||
}
|
||||
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
tx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating TX queue %d size %#x mask %#x\n",
|
||||
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
|
||||
|
||||
/* Allocate software ring */
|
||||
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!tx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
|
||||
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
|
||||
if (!tx_queue->cb_page) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
/* Allocate hardware ring */
|
||||
rc = efx_nic_probe_tx(tx_queue);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
fail1:
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"initialising TX queue %d\n", tx_queue->queue);
|
||||
|
||||
tx_queue->insert_count = 0;
|
||||
tx_queue->write_count = 0;
|
||||
tx_queue->packet_write_count = 0;
|
||||
tx_queue->old_write_count = 0;
|
||||
tx_queue->read_count = 0;
|
||||
tx_queue->old_read_count = 0;
|
||||
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
|
||||
tx_queue->xmit_more_available = false;
|
||||
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
|
||||
tx_queue->channel == efx_ptp_channel(efx));
|
||||
tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
|
||||
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
|
||||
|
||||
/* Set up default function pointers. These may get replaced by
|
||||
* efx_nic_init_tx() based off NIC/queue capabilities.
|
||||
*/
|
||||
tx_queue->handle_tso = efx_enqueue_skb_tso;
|
||||
|
||||
/* Set up TX descriptor ring */
|
||||
efx_nic_init_tx(tx_queue);
|
||||
|
||||
tx_queue->initialised = true;
|
||||
}
|
||||
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"shutting down TX queue %d\n", tx_queue->queue);
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
/* Free any buffers left in the ring */
|
||||
while (tx_queue->read_count != tx_queue->write_count) {
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
}
|
||||
tx_queue->xmit_more_available = false;
|
||||
netdev_tx_reset_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"destroying TX queue %d\n", tx_queue->queue);
|
||||
efx_nic_remove_tx(tx_queue);
|
||||
|
||||
if (tx_queue->cb_page) {
|
||||
for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
|
||||
efx_nic_free_buffer(tx_queue->efx,
|
||||
&tx_queue->cb_page[i]);
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
}
|
||||
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
}
|
||||
|
404
drivers/net/ethernet/sfc/tx_common.c
Normal file
404
drivers/net/ethernet/sfc/tx_common.c
Normal file
@@ -0,0 +1,404 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "tx_common.h"
|
||||
|
||||
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
|
||||
PAGE_SIZE >> EFX_TX_CB_ORDER);
|
||||
}
|
||||
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
tx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating TX queue %d size %#x mask %#x\n",
|
||||
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
|
||||
|
||||
/* Allocate software ring */
|
||||
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!tx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
|
||||
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
|
||||
if (!tx_queue->cb_page) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
/* Allocate hardware ring */
|
||||
rc = efx_nic_probe_tx(tx_queue);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
fail1:
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"initialising TX queue %d\n", tx_queue->queue);
|
||||
|
||||
tx_queue->insert_count = 0;
|
||||
tx_queue->write_count = 0;
|
||||
tx_queue->packet_write_count = 0;
|
||||
tx_queue->old_write_count = 0;
|
||||
tx_queue->read_count = 0;
|
||||
tx_queue->old_read_count = 0;
|
||||
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
|
||||
tx_queue->xmit_more_available = false;
|
||||
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
|
||||
tx_queue->channel == efx_ptp_channel(efx));
|
||||
tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
|
||||
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
|
||||
|
||||
/* Set up default function pointers. These may get replaced by
|
||||
* efx_nic_init_tx() based off NIC/queue capabilities.
|
||||
*/
|
||||
tx_queue->handle_tso = efx_enqueue_skb_tso;
|
||||
|
||||
/* Set up TX descriptor ring */
|
||||
efx_nic_init_tx(tx_queue);
|
||||
|
||||
tx_queue->initialised = true;
|
||||
}
|
||||
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"shutting down TX queue %d\n", tx_queue->queue);
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
/* Free any buffers left in the ring */
|
||||
while (tx_queue->read_count != tx_queue->write_count) {
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
|
||||
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
}
|
||||
tx_queue->xmit_more_available = false;
|
||||
netdev_tx_reset_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"destroying TX queue %d\n", tx_queue->queue);
|
||||
efx_nic_remove_tx(tx_queue);
|
||||
|
||||
if (tx_queue->cb_page) {
|
||||
for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
|
||||
efx_nic_free_buffer(tx_queue->efx,
|
||||
&tx_queue->cb_page[i]);
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
}
|
||||
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
}
|
||||
|
||||
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
if (buffer->unmap_len) {
|
||||
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
||||
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
||||
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
buffer->unmap_len = 0;
|
||||
}
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
|
||||
|
||||
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
|
||||
(*pkts_compl)++;
|
||||
(*bytes_compl) += skb->len;
|
||||
if (tx_queue->timestamping &&
|
||||
(tx_queue->completed_timestamp_major ||
|
||||
tx_queue->completed_timestamp_minor)) {
|
||||
struct skb_shared_hwtstamps hwtstamp;
|
||||
|
||||
hwtstamp.hwtstamp =
|
||||
efx_ptp_nic_to_kernel_time(tx_queue);
|
||||
skb_tstamp_tx(skb, &hwtstamp);
|
||||
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
}
|
||||
dev_consume_skb_any((struct sk_buff *)buffer->skb);
|
||||
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
|
||||
"TX queue %d transmission id %x complete\n",
|
||||
tx_queue->queue, tx_queue->read_count);
|
||||
} else if (buffer->flags & EFX_TX_BUF_XDP) {
|
||||
xdp_return_frame_rx_napi(buffer->xdpf);
|
||||
}
|
||||
|
||||
buffer->len = 0;
|
||||
buffer->flags = 0;
|
||||
}
|
||||
|
||||
/* Remove packets from the TX queue
|
||||
*
|
||||
* This removes packets from the TX queue, up to and including the
|
||||
* specified index.
|
||||
*/
|
||||
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
unsigned int index,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int stop_index, read_ptr;
|
||||
|
||||
stop_index = (index + 1) & tx_queue->ptr_mask;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
|
||||
while (read_ptr != stop_index) {
|
||||
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
|
||||
|
||||
if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
|
||||
unlikely(buffer->len == 0)) {
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"TX queue %d spurious TX completion id %x\n",
|
||||
tx_queue->queue, read_ptr);
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
|
||||
return;
|
||||
}
|
||||
|
||||
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
}
|
||||
}
|
||||
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_tx_queue *txq2;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
|
||||
|
||||
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
|
||||
if (pkts_compl > 1)
|
||||
++tx_queue->merge_events;
|
||||
|
||||
/* See if we need to restart the netif queue. This memory
|
||||
* barrier ensures that we write read_count (inside
|
||||
* efx_dequeue_buffers()) before reading the queue status.
|
||||
*/
|
||||
smp_mb();
|
||||
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
|
||||
likely(efx->port_enabled) &&
|
||||
likely(netif_device_present(efx->net_dev))) {
|
||||
txq2 = efx_tx_queue_partner(tx_queue);
|
||||
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
|
||||
txq2->insert_count - txq2->read_count);
|
||||
if (fill_level <= efx->txq_wake_thresh)
|
||||
netif_tx_wake_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
/* Check whether the hardware queue is now empty */
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove buffers put into a tx_queue for the current packet.
|
||||
* None of the buffers must have an skb attached.
|
||||
*/
|
||||
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0;
|
||||
|
||||
/* Work backwards until we hit the original insert pointer value */
|
||||
while (tx_queue->insert_count != insert_count) {
|
||||
--tx_queue->insert_count;
|
||||
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
}
|
||||
}
|
||||
|
||||
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len)
|
||||
{
|
||||
const struct efx_nic_type *nic_type = tx_queue->efx->type;
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int dma_len;
|
||||
|
||||
/* Map the fragment taking account of NIC-dependent DMA limits. */
|
||||
do {
|
||||
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
|
||||
|
||||
buffer->len = dma_len;
|
||||
buffer->dma_addr = dma_addr;
|
||||
buffer->flags = EFX_TX_BUF_CONT;
|
||||
len -= dma_len;
|
||||
dma_addr += dma_len;
|
||||
++tx_queue->insert_count;
|
||||
} while (len);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/* Map all data from an SKB for DMA and create descriptors on the queue. */
|
||||
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct device *dma_dev = &efx->pci_dev->dev;
|
||||
unsigned int frag_index, nr_frags;
|
||||
dma_addr_t dma_addr, unmap_addr;
|
||||
unsigned short dma_flags;
|
||||
size_t len, unmap_len;
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
frag_index = 0;
|
||||
|
||||
/* Map header data. */
|
||||
len = skb_headlen(skb);
|
||||
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
|
||||
dma_flags = EFX_TX_BUF_MAP_SINGLE;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
|
||||
if (segment_count) {
|
||||
/* For TSO we need to put the header in to a separate
|
||||
* descriptor. Map this separately if necessary.
|
||||
*/
|
||||
size_t header_len = skb_transport_header(skb) - skb->data +
|
||||
(tcp_hdr(skb)->doff << 2u);
|
||||
|
||||
if (header_len != len) {
|
||||
tx_queue->tso_long_headers++;
|
||||
efx_tx_map_chunk(tx_queue, dma_addr, header_len);
|
||||
len -= header_len;
|
||||
dma_addr += header_len;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add descriptors for each fragment. */
|
||||
do {
|
||||
struct efx_tx_buffer *buffer;
|
||||
skb_frag_t *fragment;
|
||||
|
||||
buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
|
||||
|
||||
/* The final descriptor for a fragment is responsible for
|
||||
* unmapping the whole fragment.
|
||||
*/
|
||||
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
||||
buffer->unmap_len = unmap_len;
|
||||
buffer->dma_offset = buffer->dma_addr - unmap_addr;
|
||||
|
||||
if (frag_index >= nr_frags) {
|
||||
/* Store SKB details with the final buffer for
|
||||
* the completion.
|
||||
*/
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Move on to the next fragment. */
|
||||
fragment = &skb_shinfo(skb)->frags[frag_index++];
|
||||
len = skb_frag_size(fragment);
|
||||
dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_flags = 0;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
||||
{
|
||||
/* Header and payload descriptor for each output segment, plus
|
||||
* one for every input fragment boundary within a segment
|
||||
*/
|
||||
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
|
||||
|
||||
/* Possibly one more per segment for option descriptors */
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
max_descs += EFX_TSO_MAX_SEGS;
|
||||
|
||||
/* Possibly more for PCIe page boundaries within input fragments */
|
||||
if (PAGE_SIZE > EFX_PAGE_SIZE)
|
||||
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
||||
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
|
||||
|
||||
return max_descs;
|
||||
}
|
36
drivers/net/ethernet/sfc/tx_common.h
Normal file
36
drivers/net/ethernet/sfc/tx_common.h
Normal file
@@ -0,0 +1,36 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_TX_COMMON_H
|
||||
#define EFX_TX_COMMON_H
|
||||
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
|
||||
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl);
|
||||
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
|
||||
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count);
|
||||
|
||||
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len);
|
||||
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count);
|
||||
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user