Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next

Pull networking updates from David Miller:
 "Highlights:

   1) Fix the iwlwifi regression, from Johannes Berg.

   2) Support BSS coloring and 802.11 encapsulation offloading in
      hardware, from John Crispin.

   3) Fix some potential Spectre issues in qtnfmac, from Sergey
      Matyukevich.

   4) Add TTL decrement action to openvswitch, from Matteo Croce.

   5) Allow paralleization through flow_action setup by not taking the
      RTNL mutex, from Vlad Buslov.

   6) A lot of zero-length array to flexible-array conversions, from
      Gustavo A. R. Silva.

   7) Align XDP statistics names across several drivers for consistency,
      from Lorenzo Bianconi.

   8) Add various pieces of infrastructure for offloading conntrack, and
      make use of it in mlx5 driver, from Paul Blakey.

   9) Allow using listening sockets in BPF sockmap, from Jakub Sitnicki.

  10) Lots of parallelization improvements during configuration changes
      in mlxsw driver, from Ido Schimmel.

  11) Add support to devlink for generic packet traps, which report
      packets dropped during ACL processing. And use them in mlxsw
      driver. From Jiri Pirko.

  12) Support bcmgenet on ACPI, from Jeremy Linton.

  13) Make BPF compatible with RT, from Thomas Gleixnet, Alexei
      Starovoitov, and your's truly.

  14) Support XDP meta-data in virtio_net, from Yuya Kusakabe.

  15) Fix sysfs permissions when network devices change namespaces, from
      Christian Brauner.

  16) Add a flags element to ethtool_ops so that drivers can more simply
      indicate which coalescing parameters they actually support, and
      therefore the generic layer can validate the user's ethtool
      request. Use this in all drivers, from Jakub Kicinski.

  17) Offload FIFO qdisc in mlxsw, from Petr Machata.

  18) Support UDP sockets in sockmap, from Lorenz Bauer.

  19) Fix stretch ACK bugs in several TCP congestion control modules,
      from Pengcheng Yang.

  20) Support virtual functiosn in octeontx2 driver, from Tomasz
      Duszynski.

  21) Add region operations for devlink and use it in ice driver to dump
      NVM contents, from Jacob Keller.

  22) Add support for hw offload of MACSEC, from Antoine Tenart.

  23) Add support for BPF programs that can be attached to LSM hooks,
      from KP Singh.

  24) Support for multiple paths, path managers, and counters in MPTCP.
      From Peter Krystad, Paolo Abeni, Florian Westphal, Davide Caratti,
      and others.

  25) More progress on adding the netlink interface to ethtool, from
      Michal Kubecek"

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2121 commits)
  net: ipv6: rpl_iptunnel: Fix potential memory leak in rpl_do_srh_inline
  cxgb4/chcr: nic-tls stats in ethtool
  net: dsa: fix oops while probing Marvell DSA switches
  net/bpfilter: remove superfluous testing message
  net: macb: Fix handling of fixed-link node
  net: dsa: ksz: Select KSZ protocol tag
  netdevsim: dev: Fix memory leak in nsim_dev_take_snapshot_write
  net: stmmac: add EHL 2.5Gbps PCI info and PCI ID
  net: stmmac: add EHL PSE0 & PSE1 1Gbps PCI info and PCI ID
  net: stmmac: create dwmac-intel.c to contain all Intel platform
  net: dsa: bcm_sf2: Support specifying VLAN tag egress rule
  net: dsa: bcm_sf2: Add support for matching VLAN TCI
  net: dsa: bcm_sf2: Move writing of CFP_DATA(5) into slicing functions
  net: dsa: bcm_sf2: Check earlier for FLOW_EXT and FLOW_MAC_EXT
  net: dsa: bcm_sf2: Disable learning for ASP port
  net: dsa: b53: Deny enslaving port 7 for 7278 into a bridge
  net: dsa: b53: Prevent tagged VLAN on port 7 for 7278
  net: dsa: b53: Restore VLAN entries upon (re)configuration
  net: dsa: bcm_sf2: Fix overflow checks
  hv_netvsc: Remove unnecessary round_up for recv_completion_cnt
  ...
This commit is contained in:
Linus Torvalds
2020-03-31 17:29:33 -07:00
1772 changed files with 113891 additions and 26545 deletions

View File

@@ -294,6 +294,7 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
select NET_DEVLINK
---help---
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go

View File

@@ -1852,6 +1852,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,

View File

@@ -2715,11 +2715,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);

View File

@@ -897,6 +897,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
/* fall through */
case e1000_pch_tgp:
case e1000_pch_adp:
mask |= BIT(18);
break;
default:
@@ -1561,6 +1562,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -2305,6 +2307,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,

View File

@@ -97,6 +97,11 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
#define E1000_REVISION_4 4
@@ -121,6 +126,7 @@ enum e1000_mac_type {
e1000_pch_spt,
e1000_pch_cnp,
e1000_pch_tgp,
e1000_pch_adp,
};
enum e1000_media_type {

View File

@@ -317,6 +317,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -460,6 +461,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -703,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1642,6 +1645,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2095,6 +2099,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3133,6 +3138,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4077,6 +4083,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;

View File

@@ -1363,7 +1363,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
usleep_range(50, 100);
udelay(100);
i++;
}
@@ -1381,7 +1381,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
usleep_range(50, 100);
udelay(100);
}
if (i == timeout) {

View File

@@ -3536,6 +3536,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
break;
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3807,7 +3808,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
tdt = er32(TDT(0));
BUG_ON(tdt != tx_ring->next_to_use);
tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
tx_desc->buffer_addr = tx_ring->dma;
tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
tx_desc->lower.data = cpu_to_le32(txd_lower | size);
tx_desc->upper.data = 0;
@@ -4049,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -5461,10 +5463,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -7759,6 +7758,11 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};

View File

@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
case e1000_pch_adp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;

View File

@@ -41,7 +41,7 @@ struct fm10k_l2_accel {
u16 count;
u16 dglort;
struct rcu_head rcu;
struct net_device *macvlan[0];
struct net_device *macvlan[];
};
enum fm10k_ring_state_t {
@@ -198,7 +198,7 @@ struct fm10k_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum fm10k_ring_f_enum {
@@ -218,7 +218,7 @@ struct fm10k_iov_data {
unsigned int num_vfs;
unsigned int next_vf_mbx;
struct rcu_head rcu;
struct fm10k_vf_info vf_info[0];
struct fm10k_vf_info vf_info[];
};
struct fm10k_udp_port {

View File

@@ -1151,6 +1151,8 @@ static int fm10k_set_channels(struct net_device *dev,
}
static const struct ethtool_ops fm10k_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
.get_ethtool_stats = fm10k_get_ethtool_stats,

View File

@@ -334,13 +334,13 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
struct i40e_ddp_profile_list {
u32 p_count;
struct i40e_profile_info p_info[0];
struct i40e_profile_info p_info[];
};
struct i40e_ddp_old_profile_list {
struct list_head list;
size_t old_ddp_size;
u8 old_ddp_buf[0];
u8 old_ddp_buf[];
};
/* macros related to FLX_PIT */

View File

@@ -5249,6 +5249,11 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
};
static const struct ethtool_ops i40e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH |
ETHTOOL_COALESCE_TX_USECS_HIGH,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,

View File

@@ -81,7 +81,7 @@ struct iavf_vsi {
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
#define IAVF_MAX_REQ_QUEUES 4
#define IAVF_MAX_REQ_QUEUES 16
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)

View File

@@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
ch->max_combined = IAVF_MAX_REQ_QUEUES;
ch->max_combined = adapter->vsi_res->num_queue_pairs;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
@@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
int num_req = ch->combined_count;
if (num_req != adapter->num_active_queues &&
!(adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
return -EINVAL;
}
u32 num_req = ch->combined_count;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
if (num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
if (num_req == adapter->num_active_queues)
return 0;
if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
return -EINVAL;
adapter->num_req_queues = num_req;
return iavf_request_queues(adapter, num_req);
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_schedule_reset(adapter);
return 0;
}
/**
@@ -998,6 +996,10 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,

View File

@@ -3061,9 +3061,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return iavf_configure_clsflower(adapter, cls_flower);
@@ -3087,6 +3084,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct iavf_adapter *adapter = cb_priv;
if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
@@ -3448,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
if (num_req_queues &&
num_req_queues != adapter->vsi_res->num_queue_pairs) {
num_req_queues > adapter->vsi_res->num_queue_pairs) {
/* Problem. The PF gave us fewer queues than what we had
* negotiated in our request. Need a reset to see if we can't
* get back to a working state.

View File

@@ -396,33 +396,6 @@ void iavf_map_queues(struct iavf_adapter *adapter)
kfree(vimi);
}
/**
* iavf_request_queues
* @adapter: adapter structure
* @num: number of requested queues
*
* We get a default number of queues from the PF. This enables us to request a
* different number. Returns 0 on success, negative on failure
**/
int iavf_request_queues(struct iavf_adapter *adapter, int num)
{
struct virtchnl_vf_res_request vfres;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
adapter->current_op);
return -EBUSY;
}
vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
(u8 *)&vfres, sizeof(vfres));
}
/**
* iavf_add_ether_addrs
* @adapter: adapter structure

View File

@@ -19,6 +19,7 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o

View File

@@ -34,6 +34,7 @@
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
#include "ice_devids.h"
@@ -60,7 +61,6 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
@@ -70,7 +70,6 @@ extern const char ice_drv_ver[];
#define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
#define ICE_MAX_SMALL_RSS_QS 8
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
@@ -212,6 +211,8 @@ enum ice_state {
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_STATE_NBITS /* must be last */
};
@@ -340,12 +341,18 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
/* devlink port data */
struct devlink_port devlink_port;
struct devlink_region *nvm_region;
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -361,8 +368,10 @@ struct ice_pf {
struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */

View File

@@ -1232,6 +1232,7 @@ struct ice_aqc_sff_eeprom {
* NVM Update commands (indirect 0x0703)
*/
struct ice_aqc_nvm {
#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF
__le16 offset_low;
u8 offset_high;
u8 cmd_flags;
@@ -1250,6 +1251,8 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
#define ICE_AQC_NVM_START_POINT 0
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@@ -1661,6 +1664,13 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[1];
};
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
__le32 qtx_ctl;
u8 reserved[8];
};
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1730,6 +1740,7 @@ struct ice_aq_desc {
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
} params;
};
@@ -1756,6 +1767,7 @@ enum ice_aq_err {
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
@@ -1860,6 +1872,9 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10,

View File

@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc,
"XDP ring can't belong to TC other than 0");
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
@@ -247,7 +246,6 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
/* fall through */
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -387,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
if (vsi->type != ICE_VSI_VF) {
regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
@@ -399,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
} else {
regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
}
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -459,17 +461,20 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
}
/**
* ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
* ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
* @rxq_idx: Rx queue index
* @ena: start or stop the Rx ring
* @rxq_idx: 0-based Rx queue index for the VSI passed in
* @wait: wait or don't wait for configuration to finish in hardware
*
* Return 0 on success and negative on error.
*/
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
{
int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
int ret = 0;
u32 rx_reg;
rx_reg = rd32(hw, QRX_CTRL(pf_q));
@@ -485,13 +490,30 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), rx_reg);
/* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret)
dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis"));
if (!wait)
return 0;
return ret;
ice_flush(hw);
return ice_pf_rxq_wait(pf, pf_q, ena);
}
/**
* ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
* @vsi: the VSI being configured
* @ena: true/false to verify Rx ring has been enabled/disabled respectively
* @rxq_idx: 0-based Rx queue index for the VSI passed in
*
* This routine will wait for the given Rx queue of the VSI to reach the
* enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
* the requested state after multiple retries; else will return 0 in case of
* success.
*/
int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
{
int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
return ice_pf_rxq_wait(pf, pf_q, ena);
}
/**

View File

@@ -8,7 +8,9 @@
int ice_setup_rx_ctx(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);

View File

@@ -6,7 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 200
#define ICE_PF_RESET_WAIT_COUNT 300
/**
* ice_set_mac_type - Sets MAC type
@@ -614,29 +614,6 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
}
/**
* ice_get_nvm_version - get cached NVM version data
* @hw: pointer to the hardware structure
* @oem_ver: 8 bit NVM version
* @oem_build: 16 bit NVM build number
* @oem_patch: 8 NVM patch number
* @ver_hi: high 16 bits of the NVM version
* @ver_lo: low 16 bits of the NVM version
*/
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
{
struct ice_nvm_info *nvm = &hw->nvm;
*oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
*oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
*oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
ICE_OEM_VER_BUILD_SHIFT);
*ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
*ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
}
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
@@ -957,72 +934,6 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
return ice_check_reset(hw);
}
/**
* ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
* @hw: pointer to hardware structure
* @module_tlv: pointer to module TLV to return
* @module_tlv_len: pointer to module TLV length to return
* @module_type: module type requested
*
* Finds the requested sub module TLV type from the Preserved Field
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
enum ice_status status;
u16 pfa_len, pfa_ptr;
u16 next_tlv;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
return status;
}
status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
while (next_tlv < pfa_ptr + pfa_len) {
u16 tlv_sub_module_type;
u16 tlv_len;
/* Read TLV type */
status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
break;
}
/* Read TLV length */
status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
break;
}
if (tlv_sub_module_type == module_type) {
if (tlv_len) {
*module_tlv = next_tlv;
*module_tlv_len = tlv_len;
return 0;
}
return ICE_ERR_INVAL_SIZE;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
*/
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
return ICE_ERR_DOES_NOT_EXIST;
}
/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
@@ -1181,7 +1092,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
break;
/* fall-through */
fallthrough;
default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true;
@@ -2703,7 +2614,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
/* fall-through */
fallthrough;
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;

View File

@@ -15,9 +15,6 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
@@ -38,9 +35,6 @@ enum ice_status
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
@@ -153,9 +147,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
u8 *oem_patch, u8 *ver_hi, u8 *ver_lo);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);

View File

@@ -62,6 +62,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
return ena_tc;
}
/**
* ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure
* @host: if set it's HOST if not it's MANAGED
*/
static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
{
u8 mode;
if (host)
mode = DCB_CAP_DCBX_HOST;
else
mode = DCB_CAP_DCBX_LLD_MANAGED;
if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
return mode | DCB_CAP_DCBX_VER_CEE;
else
return mode | DCB_CAP_DCBX_VER_IEEE;
}
/**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from
@@ -148,6 +168,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
}
/**
* ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
* @pf: pointer to the PF struct
* @dcbcfg: pointer to DCB config structure
*/
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
u8 num_tc, total_bw = 0;
int i;
/* returns number of contigous TCs and 1 TC for non-contigous TCs,
* since at least 1 TC has to be configured
*/
num_tc = ice_dcb_get_num_tc(dcbcfg);
/* no bandwidth checks required if there's only one TC, so assign
* all bandwidth to TC0 and return
*/
if (num_tc == 1) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
return 0;
}
for (i = 0; i < num_tc; i++)
total_bw += etscfg->tcbwtable[i];
if (!total_bw) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
} else if (total_bw != ICE_TC_MAX_BW) {
dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
return -EINVAL;
}
return 0;
}
/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return ret;
}
if (ice_dcb_bwchk(pf, new_cfg))
return -EINVAL;
/* Store old config in case FW config fails */
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
if (!old_cfg)
@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
ice_cfg_sw_lldp(pf_vsi, false, true);
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
}
set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
/* DCBX in FW and LLDP enabled in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
/* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */
pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
err = ice_dcb_init_cfg(pf, locked);
if (err)
@@ -719,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
bool need_reconfig = false;
struct ice_port_info *pi;
struct ice_vsi *pf_vsi;
u8 type;
u8 mib_type;
int ret;
/* Not DCB capable or capability disabled */
@@ -734,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
if (type == ICE_AQ_LLDP_MIB_REMOTE) {
mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
@@ -775,6 +835,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
goto out;
}
pf->dcbx_cap = ice_dcb_get_mode(pi, false);
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);

View File

@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);

View File

@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
/* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
* for the TC's index number. Add one to value if not zero, and
* for zero set it to the FW's default value
*/
if (max_tc)
max_tc++;
else
max_tc = IEEE_8021QAZ_MAX_TCS;
if (ice_dcb_bwchk(pf, new_cfg)) {
err = -EINVAL;
goto ets_out;
}
max_tc = pf->hw.func_caps.common_cap.maxtc;
new_cfg->etscfg.maxtcs = max_tc;
@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
if (err == ICE_DCB_NO_HW_CHG)
err = ICE_DCB_HW_CHG_RST;
ets_out:
mutex_unlock(&pf->tc_mutex);
return err;
}
@@ -534,6 +533,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
}
/**
* ice_dcbnl_set_pg_tc_cfg_rx
* @netdev: relevant netdev struct
* @prio: corresponding user priority
* @prio_type: the traffic priority type
* @pgid: the PG ID
* @bw_pct: BW percentage for corresponding BWG
* @up_map: prio mapped to corresponding TC
*
* lldpad requires this function pointer to be non-NULL to complete CEE config.
*/
static void
ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
int __always_unused prio,
u8 __always_unused prio_type,
u8 __always_unused pgid,
u8 __always_unused bw_pct,
u8 __always_unused up_map)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
}
/**
* ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
* @netdev: pointer to netdev struct
@@ -553,6 +576,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
*bw_pct = 0;
}
/**
* ice_dcbnl_set_pg_bwg_cfg_rx
* @netdev: the corresponding netdev
* @pgid: corresponding TC
* @bw_pct: BW percentage for given TC
*
* lldpad requires this function pointer to be non-NULL to complete CEE config.
*/
static void
ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
u8 __always_unused bw_pct)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
}
/**
* ice_dcbnl_get_cap - Get DCBX capabilities of adapter
* @netdev: pointer to netdev struct
@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
.setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
.setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,

View File

@@ -5,12 +5,34 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
#define ICE_DEV_ID_E823L_SFP 0x124D
/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
/* Intel(R) Ethernet Connection E823-L 1GbE */
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
/* Intel(R) Ethernet Connection E823-C for QSFP */
#define ICE_DEV_ID_E823C_QSFP 0x188B
/* Intel(R) Ethernet Connection E823-C for SFP */
#define ICE_DEV_ID_E823C_SFP 0x188C
/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
/* Intel(R) Ethernet Connection E823-C 1GbE */
#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */
@@ -21,8 +43,8 @@
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
/* Intel(R) Ethernet Connection E822-X for backplane */
#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for backplane */
#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */

View File

@@ -0,0 +1,416 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
dsn[0], dsn[1], dsn[2], dsn[3],
dsn[4], dsn[5], dsn[6], dsn[7]);
return 0;
}
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
status = ice_read_pba_string(hw, (u8 *)buf, len);
if (status)
return -EIO;
return 0;
}
static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
hw->fw_patch);
return 0;
}
static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
return 0;
}
static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "0x%08x", hw->fw_build);
return 0;
}
static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_orom_info *orom = &pf->hw.nvm.orom;
snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
return 0;
}
static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_nvm_info *nvm = &pf->hw.nvm;
snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
return 0;
}
static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_nvm_info *nvm = &pf->hw.nvm;
snprintf(buf, len, "0x%08x", nvm->eetrack);
return 0;
}
static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "%s", hw->active_pkg_name);
return 0;
}
static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
pkg->draft);
return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
enum ice_version_type {
ICE_VERSION_FIXED,
ICE_VERSION_RUNNING,
ICE_VERSION_STORED,
};
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
int (*getter)(struct ice_pf *pf, char *buf, size_t len);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
running("fw.mgmt.api", ice_info_fw_api),
running("fw.mgmt.build", ice_info_fw_build),
running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
running("fw.psid.api", ice_info_nvm_ver),
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
};
/**
* ice_devlink_info_get - .info_get devlink handler
* @devlink: devlink instance structure
* @req: the devlink info request
* @extack: extended netdev ack structure
*
* Callback for the devlink .info_get operation. Reports information about the
* device.
*
* Return: zero on success or an error code on failure.
*/
static int ice_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
char buf[100];
size_t i;
int err;
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
return err;
}
err = ice_info_get_dsn(pf, buf, sizeof(buf));
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
return err;
}
err = devlink_info_serial_number_put(req, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
return err;
}
for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
enum ice_version_type type = ice_devlink_versions[i].type;
const char *key = ice_devlink_versions[i].key;
err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
return err;
}
switch (type) {
case ICE_VERSION_FIXED:
err = devlink_info_version_fixed_put(req, key, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
return err;
}
break;
case ICE_VERSION_RUNNING:
err = devlink_info_version_running_put(req, key, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
return err;
}
break;
case ICE_VERSION_STORED:
err = devlink_info_version_stored_put(req, key, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
return err;
}
break;
}
}
return 0;
}
static const struct devlink_ops ice_devlink_ops = {
.info_get = ice_devlink_info_get,
};
static void ice_devlink_free(void *devlink_ptr)
{
devlink_free((struct devlink *)devlink_ptr);
}
/**
* ice_allocate_pf - Allocate devlink and return PF structure pointer
* @dev: the device to allocate for
*
* Allocate a devlink instance for this device and return the private area as
* the PF structure. The devlink memory is kept track of through devres by
* adding an action to remove it when unwinding.
*/
struct ice_pf *ice_allocate_pf(struct device *dev)
{
struct devlink *devlink;
devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
if (!devlink)
return NULL;
/* Add an action to teardown the devlink when unwinding the driver */
if (devm_add_action(dev, ice_devlink_free, devlink)) {
devlink_free(devlink);
return NULL;
}
return devlink_priv(devlink);
}
/**
* ice_devlink_register - Register devlink interface for this PF
* @pf: the PF to register the devlink for.
*
* Register the devlink instance associated with this physical function.
*
* Return: zero on success or an error code on failure.
*/
int ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
int err;
err = devlink_register(devlink, dev);
if (err) {
dev_err(dev, "devlink registration failed: %d\n", err);
return err;
}
return 0;
}
/**
* ice_devlink_unregister - Unregister devlink resources for this PF.
* @pf: the PF structure to cleanup
*
* Releases resources used by devlink and cleans up associated memory.
*/
void ice_devlink_unregister(struct ice_pf *pf)
{
devlink_unregister(priv_to_devlink(pf));
}
/**
* ice_devlink_create_port - Create a devlink port for this PF
* @pf: the PF to create a port for
*
* Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port
* will still be numbered according to the physical function id.
*
* Return: zero on success or an error code on failure.
*/
int ice_devlink_create_port(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct device *dev = ice_pf_to_dev(pf);
int err;
if (!vsi) {
dev_err(dev, "%s: unable to find main VSI\n", __func__);
return -EIO;
}
devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
pf->hw.pf_id, false, 0, NULL, 0);
err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
return err;
}
return 0;
}
/**
* ice_devlink_destroy_port - Destroy the devlink_port for this PF
* @pf: the PF to cleanup
*
* Unregisters the devlink_port structure associated with this PF.
*/
void ice_devlink_destroy_port(struct ice_pf *pf)
{
devlink_port_type_clear(&pf->devlink_port);
devlink_port_unregister(&pf->devlink_port);
}
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
* the shadow-ram devlink region. It captures a snapshot of the shadow ram
* contents. This snapshot can later be viewed via the devlink-region
* interface.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
enum ice_status status;
void *nvm_data;
u32 nvm_size;
nvm_size = hw->nvm.flash_size;
nvm_data = vzalloc(nvm_size);
if (!nvm_data)
return -ENOMEM;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
vfree(nvm_data);
return -EIO;
}
status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
nvm_size, status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
vfree(nvm_data);
return -EIO;
}
ice_release_nvm(hw);
*data = nvm_data;
return 0;
}
static const struct devlink_region_ops ice_nvm_region_ops = {
.name = "nvm-flash",
.destructor = vfree,
.snapshot = ice_devlink_nvm_snapshot,
};
/**
* ice_devlink_init_regions - Initialize devlink regions
* @pf: the PF device structure
*
* Create devlink regions used to enable access to dump the contents of the
* flash memory on the device.
*/
void ice_devlink_init_regions(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
u64 nvm_size;
nvm_size = pf->hw.nvm.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
nvm_size);
if (IS_ERR(pf->nvm_region)) {
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
PTR_ERR(pf->nvm_region));
pf->nvm_region = NULL;
}
}
/**
* ice_devlink_destroy_regions - Destroy devlink regions
* @pf: the PF device structure
*
* Remove previously created regions for this PF.
*/
void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
}

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_DEVLINK_H_
#define _ICE_DEVLINK_H_
struct ice_pf *ice_allocate_pf(struct device *dev);
int ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
int ice_devlink_create_port(struct ice_pf *pf);
void ice_devlink_destroy_port(struct ice_pf *pf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
#endif /* _ICE_DEVLINK_H_ */

View File

@@ -157,6 +157,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -166,25 +167,26 @@ static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u16 oem_build;
struct ice_orom_info *orom;
struct ice_nvm_info *nvm;
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
nvm = &hw->nvm;
orom = &nvm->orom;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information.
*/
ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
&nvm_ver_hi, &nvm_ver_lo);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
"%x.%02x 0x%x %d.%d.%d", nvm->major_ver, nvm->minor_ver,
nvm->eetrack, orom->major, orom->build, orom->patch);
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -243,7 +245,7 @@ static int ice_get_eeprom_len(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
return (int)(pf->hw.nvm.sr_words * sizeof(u16));
return (int)pf->hw.nvm.flash_size;
}
static int
@@ -251,39 +253,46 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
u16 first_word, last_word, nwords;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
struct device *dev;
int ret = 0;
u16 *buf;
u8 *buf;
dev = ice_pf_to_dev(pf);
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n",
eeprom->cmd, eeprom->offset, eeprom->len);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
nwords = last_word - first_word + 1;
buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL);
buf = kzalloc(eeprom->len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
status = ice_read_sr_buf(hw, first_word, &nwords, buf);
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n",
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
eeprom->len = sizeof(u16) * nwords;
ret = -EIO;
goto out;
}
memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len);
status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
false);
if (status) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto release;
}
memcpy(bytes, buf, eeprom->len);
release:
ice_release_nvm(hw);
out:
devm_kfree(dev, buf);
kfree(buf);
return ret;
}
@@ -462,7 +471,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
status = ice_vsi_start_rx_rings(vsi);
status = ice_vsi_start_all_rx_rings(vsi);
if (status)
goto err_start_rx_ring;
@@ -494,7 +503,7 @@ static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, status);
status = ice_vsi_stop_rx_rings(vsi);
status = ice_vsi_stop_all_rx_rings(vsi);
if (status)
netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, status);
@@ -672,7 +681,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
if (!test_vsi) {
netdev_err(netdev, "Failed to create a VSI for the loopback test");
netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
return 1;
}
@@ -731,7 +740,7 @@ lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
netdev_err(netdev, "Could not remove MAC filter for the test VSI");
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list:
ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
@@ -744,7 +753,7 @@ lbtest_rings_dis:
lbtest_vsi_close:
test_vsi->netdev = NULL;
if (ice_vsi_release(test_vsi))
netdev_err(netdev, "Failed to remove the test VSI");
netdev_err(netdev, "Failed to remove the test VSI\n");
return ret;
}
@@ -834,7 +843,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev);
if (status) {
dev_err(dev, "Could not open device %s, err %d",
dev_err(dev, "Could not open device %s, err %d\n",
pf->int_name, status);
}
}
@@ -1091,7 +1100,6 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fecparam->active_fec = ETHTOOL_FEC_BASER;
break;
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
/* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fecparam->active_fec = ETHTOOL_FEC_RS;
break;
@@ -1131,6 +1139,33 @@ done:
return err;
}
/**
* ice_nway_reset - restart autonegotiation
* @netdev: network interface device structure
*/
static int ice_nway_reset(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
pi = vsi->port_info;
/* If VSI state is up, then restart autoneg with link up */
if (!test_bit(__ICE_DOWN, vsi->back->state))
status = ice_aq_set_link_restart_an(pi, true, NULL);
else
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
return -EIO;
}
return 0;
}
/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
@@ -1264,6 +1299,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
dev_dbg(dev, "Fail to enable MIB change events\n");
ice_nway_reset(netdev);
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -1781,7 +1818,6 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
Asym_Pause);
break;
case ICE_FC_PFC:
/* fall through */
default:
ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
@@ -2776,30 +2812,6 @@ done:
return err;
}
static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
pi = vsi->port_info;
/* If VSI state is up, then restart autoneg with link up */
if (!test_bit(__ICE_DOWN, vsi->back->state))
status = ice_aq_set_link_restart_an(pi, true, NULL);
else
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
return -EIO;
}
return 0;
}
/**
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
@@ -3453,12 +3465,6 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs_high) {
netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
c_type_str);
return -EINVAL;
}
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3534,53 +3540,6 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
return 0;
}
/**
* ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
*
* Print netdev info if driver doesn't support one of the parameters
* and return error. When any parameters will be implemented, remove only
* this parameter from param array.
*/
static int
ice_is_coalesce_param_invalid(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ice_ethtool_not_used {
u32 value;
const char *name;
} param[] = {
{ec->stats_block_coalesce_usecs, "stats-block-usecs"},
{ec->rate_sample_interval, "sample-interval"},
{ec->pkt_rate_low, "pkt-rate-low"},
{ec->pkt_rate_high, "pkt-rate-high"},
{ec->rx_max_coalesced_frames, "rx-frames"},
{ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
{ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
{ec->tx_max_coalesced_frames, "tx-frames"},
{ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
{ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
{ec->rx_coalesce_usecs_low, "rx-usecs-low"},
{ec->rx_max_coalesced_frames_low, "rx-frames-low"},
{ec->tx_coalesce_usecs_low, "tx-usecs-low"},
{ec->tx_max_coalesced_frames_low, "tx-frames-low"},
{ec->rx_max_coalesced_frames_high, "rx-frames-high"},
{ec->tx_max_coalesced_frames_high, "tx-frames-high"}
};
int i;
for (i = 0; i < ARRAY_SIZE(param); i++) {
if (param[i].value) {
netdev_info(netdev, "Setting %s not supported\n",
param[i].name);
return -EINVAL;
}
}
return 0;
}
/**
* ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
* @netdev: netdev used for print
@@ -3621,9 +3580,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (ice_is_coalesce_param_invalid(netdev, ec))
return -EINVAL;
if (q_num < 0) {
struct ice_q_vector *q_vector = vsi->q_vectors[0];
int v_idx;
@@ -3818,6 +3774,9 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -3867,6 +3826,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings,

View File

@@ -3470,6 +3470,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
return 0;
}
/**
* ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
* @hw: pointer to the HW struct
* @idx: the index of the TCAM entry to remove
* @chg: the list of change structures to search
*/
static void
ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
{
struct ice_chs_chg *pos, *tmp;
list_for_each_entry_safe(tmp, pos, chg, list_entry)
if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
list_del(&tmp->list_entry);
devm_kfree(ice_hw_to_dev(hw), tmp);
}
}
/**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct
@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
enum ice_status status;
struct ice_chs_chg *p;
/* Default: enable means change the low flag bit to don't care */
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
/* if disabling, free the TCAM */
if (!enable) {
status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
/* if we have already created a change for this TCAM entry, then
* we need to remove that entry, in order to prevent writing to
* a TCAM entry we no longer will have ownership of.
*/
ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
tcam->tcam_idx = 0;
tcam->in_use = 0;
return status;
@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @blk: hardware block
* @vsig: the VSIG to which this profile is to be added
* @hdl: the profile handle indicating the profile to add
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
static enum ice_status
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct list_head *chg)
bool rev, struct list_head *chg)
{
/* Masks that ignore flags */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 i;
u16 vsig_idx, i;
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* add profile to VSIG */
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
vsig_idx = vsig & ICE_VSIG_IDX_M;
if (rev)
list_add_tail(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
else
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
return 0;
@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
if (status)
goto err_ice_create_prof_id_vsig;
status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
if (status)
goto err_ice_create_prof_id_vsig;
@@ -3753,11 +3782,13 @@ err_ice_create_prof_id_vsig:
* @blk: hardware block
* @vsi: the initial VSI that will be in VSIG
* @lst: the list of profile that will be added to the VSIG
* @new_vsig: return of new VSIG
* @chg: the change list
*/
static enum ice_status
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct list_head *lst, struct list_head *chg)
struct list_head *lst, u16 *new_vsig,
struct list_head *chg)
{
struct ice_vsig_prof *t;
enum ice_status status;
@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
return status;
list_for_each_entry(t, lst, list) {
/* Reverse the order here since we are copying the list */
status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
chg);
true, chg);
if (status)
return status;
}
*new_vsig = vsig;
return 0;
}
@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* not sharing entries and we can simply add the new
* profile to the VSIG.
*/
status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
&chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
} else {
/* No match, so we need a new VSIG */
status = ice_create_vsig_from_lst(hw, blk, vsi,
&union_lst, &chg);
&union_lst, &vsig,
&chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* new VSIG and TCAM entries
*/
status = ice_create_vsig_from_lst(hw, blk, vsi,
&copy, &chg);
&copy, &vsig,
&chg);
if (status)
goto err_ice_rem_prof_id_flow;

View File

@@ -694,7 +694,7 @@ out:
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
* @type: type of the field
* @field_type: type of the field
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
* entry's input buffer
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
@@ -715,16 +715,16 @@ out:
*/
static void
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
enum ice_flow_fld_match_type type, u16 val_loc,
enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit;
if (type == ICE_FLOW_FLD_TYPE_RANGE)
if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
seg->fields[fld].type = type;
seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc;
seg->fields[fld].src.mask = mask_loc;
seg->fields[fld].src.last = last_loc;

View File

@@ -85,6 +85,7 @@
#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
#define QRXFLXP_CNTXT_TS_M BIT(11)
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
@@ -217,6 +218,8 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -286,6 +289,8 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
#define GLDCB_RTCTQ_RXQNUM_S 0
#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))

View File

@@ -9,11 +9,11 @@
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
* @type: VSI type enum
* @vsi_type: VSI type enum
*/
const char *ice_vsi_type_str(enum ice_vsi_type type)
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
{
switch (type) {
switch (vsi_type) {
case ICE_VSI_PF:
return "ICE_VSI_PF";
case ICE_VSI_VF:
@@ -26,16 +26,26 @@ const char *ice_vsi_type_str(enum ice_vsi_type type)
}
/**
* ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
* ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
*
* First enable/disable all of the Rx rings, flush any remaining writes, and
* then verify that they have all been enabled/disabled successfully. This will
* let all of the register writes complete when enabling/disabling the Rx rings
* before waiting for the change in hardware to complete.
*/
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
int i, ret = 0;
for (i = 0; i < vsi->num_rxq; i++)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw);
for (i = 0; i < vsi->num_rxq; i++) {
ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
}
@@ -111,7 +121,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
/* fall through */
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -169,12 +178,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vf = &pf->vf[vsi->vf_id];
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
/* pf->num_msix_per_vf includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
@@ -341,13 +350,13 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
* @vsi_type: type of VSI
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -368,13 +377,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
if (!vsi)
goto unlock_pf;
vsi->type = type;
vsi->type = vsi_type;
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
vsi->idx = pf->next_vsi;
if (type == ICE_VSI_VF)
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
@@ -433,7 +442,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
.mapping_mode = vsi->tx_mapping_mode
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -443,18 +452,21 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
.mapping_mode = vsi->rx_mapping_mode
.mapping_mode = ICE_VSI_MAP_CONTIG
};
int ret = 0;
vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
int ret;
ret = __ice_vsi_get_qs(&tx_qs_cfg);
if (!ret)
ret = __ice_vsi_get_qs(&rx_qs_cfg);
if (ret)
return ret;
vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
return ret;
ret = __ice_vsi_get_qs(&rx_qs_cfg);
if (ret)
return ret;
vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
return 0;
}
/**
@@ -559,12 +571,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table
* For VSI_LUT, LUT size should be set to 64 bytes
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
*/
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_size = min_t(int, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_LB:
@@ -672,7 +683,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
if (vsi->type == ICE_VSI_PF)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
max_rss = ICE_MAX_RSS_QS_PER_VF;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
qcount_rx = min_t(int, qcount_rx,
@@ -804,7 +815,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_LB:
/* fall through */
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
@@ -896,6 +906,109 @@ out:
return ret;
}
/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
* @id: identifier to track owner
*
* Returns number of resources freed
*/
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
int count = 0;
int i;
if (!res || index >= res->end)
return -EINVAL;
id |= ICE_RES_VALID_BIT;
for (i = index; i < res->end && res->list[i] == id; i++) {
res->list[i] = 0;
count++;
}
return count;
}
/**
* ice_search_res - Search the tracker for a block of resources
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
*
* Returns the base item index of the block, or -ENOMEM for error
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
int start = 0, end = 0;
if (needed > res->end)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
do {
/* skip already allocated entries */
if (res->list[end++] & ICE_RES_VALID_BIT) {
start = end;
if ((start + needed) > res->end)
break;
}
if (end == (start + needed)) {
int i = start;
/* there was enough, so assign it to the requestor */
while (i != end)
res->list[i++] = id;
return start;
}
} while (end < res->end);
return -ENOMEM;
}
/**
* ice_get_free_res_count - Get free count from a resource tracker
* @res: Resource tracker instance
*/
static u16 ice_get_free_res_count(struct ice_res_tracker *res)
{
u16 i, count = 0;
for (i = 0; i < res->end; i++)
if (!(res->list[i] & ICE_RES_VALID_BIT))
count++;
return count;
}
/**
* ice_get_res - get a block of resources
* @pf: board private structure
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
*
* Returns the base item index of the block, or negative for error
*/
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
if (!res || !pf)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
return ice_search_res(res, needed, id);
}
/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
@@ -928,8 +1041,9 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
ice_get_free_res_count(pf->irq_tracker),
ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
pf->num_avail_sw_msix -= num_q_vectors;
@@ -1348,7 +1462,9 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&tmp->list_entry, &tmp_add_list);
status = ice_add_vlan(&pf->hw, &tmp_add_list);
if (status) {
if (!status) {
vsi->num_vlan++;
} else {
err = -ENODEV;
dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
vsi->vsi_num);
@@ -1390,10 +1506,12 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&list->list_entry, &tmp_add_list);
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) {
if (!status) {
vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
} else if (status) {
} else {
dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
@@ -1678,25 +1796,25 @@ out:
}
/**
* ice_vsi_start_rx_rings - start VSI's Rx rings
* @vsi: the VSI whose rings are to be started
* ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
* @vsi: the VSI whose rings are to be enabled
*
* Returns 0 on success and a negative value on error
*/
int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
{
return ice_vsi_ctrl_rx_rings(vsi, true);
return ice_vsi_ctrl_all_rx_rings(vsi, true);
}
/**
* ice_vsi_stop_rx_rings - stop VSI's Rx rings
* @vsi: the VSI
* ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
* @vsi: the VSI whose rings are to be disabled
*
* Returns 0 on success and a negative value on error
*/
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
{
return ice_vsi_ctrl_rx_rings(vsi, false);
return ice_vsi_ctrl_all_rx_rings(vsi, false);
}
/**
@@ -1755,6 +1873,20 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
}
/**
* ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
* @vsi: VSI to check whether or not VLAN pruning is enabled.
*
* returns true if Rx VLAN pruning is enabled and false otherwise.
*/
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
{
if (!vsi)
return false;
return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
}
/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
@@ -1952,7 +2084,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
* @type: VSI type
* @vsi_type: VSI type
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
@@ -1964,7 +2096,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id)
enum ice_vsi_type vsi_type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -1972,10 +2104,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
if (type == ICE_VSI_VF)
vsi = ice_vsi_alloc(pf, type, vf_id);
if (vsi_type == ICE_VSI_VF)
vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2025,6 +2157,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
/* Always add VLAN ID 0 switch rule by default. This is needed
* in order to allow all untagged and 0 tagged priority traffic
* if Rx VLAN pruning is enabled. Also there are cases where we
* don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
* so this handles those cases (i.e. adding the PF to a bridge
* without the 8021q module loaded).
*/
ret = ice_vsi_add_vlan(vsi, 0);
if (ret)
goto unroll_clear_rings;
ice_vsi_map_rings_to_vectors(vsi);
/* Do not exit if configuring RSS had an issue, at least
@@ -2104,6 +2247,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
return vsi;
unroll_clear_rings:
ice_vsi_clear_rings(vsi);
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
@@ -2298,94 +2443,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
}
/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
* @index: starting index previously returned by ice_get_res
* @id: identifier to track owner
*
* Returns number of resources freed
*/
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
int count = 0;
int i;
if (!res || index >= res->end)
return -EINVAL;
id |= ICE_RES_VALID_BIT;
for (i = index; i < res->end && res->list[i] == id; i++) {
res->list[i] = 0;
count++;
}
return count;
}
/**
* ice_search_res - Search the tracker for a block of resources
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
*
* Returns the base item index of the block, or -ENOMEM for error
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
int start = 0, end = 0;
if (needed > res->end)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
do {
/* skip already allocated entries */
if (res->list[end++] & ICE_RES_VALID_BIT) {
start = end;
if ((start + needed) > res->end)
break;
}
if (end == (start + needed)) {
int i = start;
/* there was enough, so assign it to the requestor */
while (i != end)
res->list[i++] = id;
return start;
}
} while (end < res->end);
return -ENOMEM;
}
/**
* ice_get_res - get a block of resources
* @pf: board private structure
* @res: pointer to the resource
* @needed: size of the block needed
* @id: identifier to track owner
*
* Returns the base item index of the block, or negative for error
*/
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
if (!res || !pf)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
return ice_search_res(res, needed, id);
}
/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured

View File

@@ -6,7 +6,7 @@
#include "ice.h"
const char *ice_vsi_type_str(enum ice_vsi_type type);
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -30,9 +30,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
@@ -42,6 +42,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -56,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
enum ice_vsi_type vsi_type, u16 vf_id);
void ice_napi_del(struct ice_vsi *vsi);

View File

@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -706,7 +707,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
/* Get FEC mode based on negotiated link info */
switch (vsi->port_info->phy.link_info.fec_info) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
/* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fec = "RS-FEC";
break;
@@ -1029,6 +1029,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ice_handle_link_event(pf, &event))
dev_err(dev, "Could not handle link event\n");
break;
case ice_aqc_opc_event_lan_overflow:
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -1185,20 +1188,28 @@ static void ice_service_timer(struct timer_list *t)
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
* Called from service task. OICR interrupt handler indicates MDD event
* Called from service task. OICR interrupt handler indicates MDD event.
* VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
* messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
* disable the queue, the PF can be configured to reset the VF using ethtool
* private flag mdd-auto-reset-vf.
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool mdd_detected = false;
u32 reg;
int i;
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
* there are pending MDD events.
*/
ice_print_vfs_mdd_events(pf);
return;
}
/* find what triggered the MDD event */
/* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
@@ -1214,7 +1225,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
mdd_detected = true;
}
reg = rd32(hw, GL_MDET_TX_TCLAN);
@@ -1232,7 +1242,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
mdd_detected = true;
}
reg = rd32(hw, GL_MDET_RX);
@@ -1250,85 +1259,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
mdd_detected = true;
}
if (mdd_detected) {
bool pf_mdd_detected = false;
reg = rd32(hw, PF_MDET_TX_PQM);
if (reg & PF_MDET_TX_PQM_VALID_M) {
wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
dev_info(dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_TX_TCLAN);
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
dev_info(dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_RX);
if (reg & PF_MDET_RX_VALID_M) {
wr32(hw, PF_MDET_RX, 0xFFFF);
dev_info(dev, "RX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
/* Queue belongs to the PF initiate a reset */
if (pf_mdd_detected) {
set_bit(__ICE_NEEDS_RESTART, pf->state);
ice_service_task_schedule(pf);
}
/* check to see if this PF caused an MDD event */
reg = rd32(hw, PF_MDET_TX_PQM);
if (reg & PF_MDET_TX_PQM_VALID_M) {
wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
}
/* check to see if one of the VFs caused the MDD */
reg = rd32(hw, PF_MDET_TX_TCLAN);
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
}
reg = rd32(hw, PF_MDET_RX);
if (reg & PF_MDET_RX_VALID_M) {
wr32(hw, PF_MDET_RX, 0xFFFF);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
}
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false;
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf_mdd_detected = true;
dev_info(dev, "TX driver issue detected on VF %d\n",
i);
vf->mdd_tx_events.count++;
set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
i);
}
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf_mdd_detected = true;
dev_info(dev, "TX driver issue detected on VF %d\n",
i);
vf->mdd_tx_events.count++;
set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
i);
}
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf_mdd_detected = true;
dev_info(dev, "TX driver issue detected on VF %d\n",
i);
vf->mdd_tx_events.count++;
set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
i);
}
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf_mdd_detected = true;
dev_info(dev, "RX driver issue detected on VF %d\n",
i);
}
vf->mdd_rx_events.count++;
set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
i);
if (vf_mdd_detected) {
vf->num_mdd_events++;
if (vf->num_mdd_events &&
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
* private flag mdd-auto-reset-vf.
*/
if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
ice_reset_vf(&pf->vf[i], false);
}
}
ice_print_vfs_mdd_events(pf);
}
/**
@@ -1510,7 +1519,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
@@ -1916,8 +1925,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
ret = ice_down(vsi);
if (ret) {
NL_SET_ERR_MSG_MOD(extack,
"Preparing device for XDP attach failed");
NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
return ret;
}
}
@@ -1926,13 +1934,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
vsi->num_xdp_txq = vsi->alloc_txq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack,
"Setting up XDP Tx resources failed");
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack,
"Freeing XDP Tx resources failed");
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
ice_vsi_assign_bpf_prog(vsi, prog);
}
@@ -1965,8 +1971,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_vsi *vsi = np->vsi;
if (vsi->type != ICE_VSI_PF) {
NL_SET_ERR_MSG_MOD(xdp->extack,
"XDP can be loaded only on PF VSI");
NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
return -EINVAL;
}
@@ -1993,6 +1998,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u32 val;
/* Disable anti-spoof detection interrupt to prevent spurious event
* interrupts during a function reset. Anti-spoof functionally is
* still supported.
*/
val = rd32(hw, GL_MDCK_TX_TDPU);
val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
wr32(hw, GL_MDCK_TX_TDPU, val);
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
@@ -2042,8 +2055,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_VFLR_M) {
ena_mask &= ~PFINT_OICR_VFLR_M;
set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
/* disable any further VFLR event notifications */
if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
u32 reg = rd32(hw, PFINT_OICR_ENA);
reg &= ~PFINT_OICR_VFLR_M;
wr32(hw, PFINT_OICR_ENA, reg);
} else {
ena_mask &= ~PFINT_OICR_VFLR_M;
set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
}
}
if (oicr & PFINT_OICR_GRST_M) {
@@ -2351,10 +2372,16 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
err = ice_devlink_create_port(pf);
if (err)
return err;
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
if (!netdev)
return -ENOMEM;
if (!netdev) {
err = -ENOMEM;
goto err_destroy_devlink_port;
}
vsi->netdev = netdev;
np = netdev_priv(netdev);
@@ -2384,7 +2411,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
return err;
goto err_destroy_devlink_port;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -2392,6 +2421,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netif_tx_stop_all_queues(vsi->netdev);
return 0;
err_destroy_devlink_port:
ice_devlink_destroy_port(pf);
return err;
}
/**
@@ -2461,16 +2495,19 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
/* Enable VLAN pruning when a VLAN other than 0 is added */
if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
/* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret) {
@@ -2500,6 +2537,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
@@ -2507,8 +2548,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (ret)
return ret;
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
/* Disable pruning when VLAN 0 is the only VLAN rule */
if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
vsi->vlan_ena = false;
@@ -2945,7 +2986,6 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
}
break;
case ICE_ERR_BUF_TOO_SHORT:
/* fall-through */
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
@@ -2977,7 +3017,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
default:
break;
}
/* fall-through */
fallthrough;
default:
dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
@@ -3069,30 +3109,22 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
* followed by a EUI-64 identifier (PCIe Device Serial Number)
*/
struct pci_dev *pdev = pf->pdev;
char *opt_fw_filename = NULL;
u32 dword;
u8 dsn[8];
int pos;
char *opt_fw_filename;
u64 dsn;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability).
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (pos) {
opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
if (!opt_fw_filename)
return NULL;
dsn = pci_get_dsn(pdev);
if (!dsn)
return NULL;
pci_read_config_dword(pdev, pos + 4, &dword);
put_unaligned_le32(dword, &dsn[0]);
pci_read_config_dword(pdev, pos + 8, &dword);
put_unaligned_le32(dword, &dsn[4]);
snprintf(opt_fw_filename, NAME_MAX,
"%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
ICE_DDP_PKG_PATH,
dsn[7], dsn[6], dsn[5], dsn[4],
dsn[3], dsn[2], dsn[1], dsn[0]);
}
opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
if (!opt_fw_filename)
return NULL;
snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
}
@@ -3166,7 +3198,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return err;
}
pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
pf = ice_allocate_pf(dev);
if (!pf)
return -ENOMEM;
@@ -3204,6 +3236,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
err = ice_devlink_register(pf);
if (err) {
dev_err(dev, "ice_devlink_register failed: %d\n", err);
goto err_exit_unroll;
}
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -3238,6 +3276,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
ice_devlink_init_regions(pf);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
@@ -3336,6 +3376,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return 0;
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -3346,8 +3387,10 @@ err_init_interrupt_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
return err;
}
@@ -3370,11 +3413,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
ice_free_vfs(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -3383,7 +3430,10 @@ static void ice_remove(struct pci_dev *pdev)
ice_vsi_free_q_vectors(pf->vsi[i]);
}
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
ice_devlink_unregister(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
@@ -3534,15 +3584,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
/* required last entry */
{ 0, }
};
@@ -3961,7 +4022,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
err = ice_vsi_start_rx_rings(vsi);
err = ice_vsi_start_all_rx_rings(vsi);
if (err)
return err;
@@ -4340,7 +4401,7 @@ int ice_down(struct ice_vsi *vsi)
vsi->vsi_num, tx_err);
}
rx_err = ice_vsi_stop_rx_rings(vsi);
rx_err = ice_vsi_stop_all_rx_rings(vsi);
if (rx_err)
netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
@@ -5027,6 +5088,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/**
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: Tx queue
*/
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -5064,13 +5126,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {

View File

@@ -11,25 +11,29 @@
* @length: length of the section to be read (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
* @read_shadow_ram: tell if this is a shadow RAM read
* @cd: pointer to command details structure or NULL
*
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, struct ice_sq_cd *cd)
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
cmd = &desc.params.nvm;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
if (offset > ICE_AQC_NVM_MAX_OFFSET)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
@@ -42,65 +46,64 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
}
/**
* ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to access
*/
static enum ice_status
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
if ((offset + words) > hw->nvm.sr_words) {
ice_debug(hw, ICE_DBG_NVM,
"NVM error: offset beyond SR lmt.\n");
return ICE_ERR_PARAM;
}
if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
/* We can access only up to 4KB (one sector), in one AQ write */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: tried to access %d words, limit is %d.\n",
words, ICE_SR_SECTOR_SIZE_IN_WORDS);
return ICE_ERR_PARAM;
}
if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
(offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
/* A single access cannot spread over two sectors */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: cannot spread over two sectors.\n");
return ICE_ERR_PARAM;
}
return 0;
}
/**
* ice_read_sr_aq - Read Shadow RAM.
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to read
* @data: buffer for words reads from Shadow RAM
* @last_command: tells the AdminQ that this is the last command
* ice_read_flat_nvm - Read portion of NVM by flat offset
* @hw: pointer to the HW struct
* @offset: offset from beginning of NVM
* @length: (in) number of bytes to read; (out) number of bytes actually read
* @data: buffer to return data in (sized to fit the specified length)
* @read_shadow_ram: if true, read from shadow RAM instead of NVM
*
* Reads 16-bit word buffers from the Shadow RAM using the admin command.
* Reads a portion of the NVM, as a flat memory space. This function correctly
* breaks read requests across Shadow RAM sectors and ensures that no single
* read request exceeds the maximum 4Kb read for a single AdminQ command.
*
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
static enum ice_status
ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
bool last_command)
enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
status = ice_check_sr_access_params(hw, offset, words);
*length = 0;
/* values in "offset" and "words" parameters are sized as words
* (16 bits) but ice_aq_read_nvm expects these values in bytes.
* So do this conversion while calling ice_aq_read_nvm.
*/
if (!status)
status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data,
last_command, NULL);
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM,
"NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
do {
u32 read_size, sector_offset;
/* ice_aq_read_nvm cannot read more than 4Kb at a time.
* Additionally, a read from the Shadow RAM may not cross over
* a sector boundary. Conveniently, the sector size is also
* 4Kb.
*/
sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
inlen - bytes_read);
last_cmd = !(bytes_read + read_size < inlen);
status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
offset, read_size,
data + bytes_read, last_cmd,
read_shadow_ram, NULL);
if (status)
break;
bytes_read += read_size;
offset += read_size;
} while (!last_cmd);
*length = bytes_read;
return status;
}
@@ -110,75 +113,25 @@ ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method.
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
static enum ice_status
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
enum ice_status status;
__le16 data_local;
status = ice_read_sr_aq(hw, offset, 1, data, true);
if (!status)
*data = le16_to_cpu(*(__force __le16 *)data);
/* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
* Shadow RAM sector restrictions necessary when reading from the NVM.
*/
status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
(u8 *)&data_local, true);
if (status)
return status;
return status;
}
/**
* ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq
* method. Ownership of the NVM is taken before reading the buffer and later
* released.
*/
static enum ice_status
ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
enum ice_status status;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
do {
u16 read_size, off_w;
/* Calculate number of bytes we should read in this step.
* It's not allowed to read more than one page at a time or
* to cross page boundaries.
*/
off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
read_size = off_w ?
min_t(u16, *words,
(ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
min_t(u16, (*words - words_read),
ICE_SR_SECTOR_SIZE_IN_WORDS);
/* Check if this is last command, if so set proper flag */
if ((words_read + read_size) >= *words)
last_cmd = true;
status = ice_read_sr_aq(hw, offset, read_size,
data + words_read, last_cmd);
if (status)
goto read_nvm_buf_aq_exit;
/* Increment counter for words already read and move offset to
* new read location
*/
words_read += read_size;
offset += read_size;
} while (words_read < *words);
for (i = 0; i < *words; i++)
data[i] = le16_to_cpu(((__force __le16 *)data)[i]);
read_nvm_buf_aq_exit:
*words = words_read;
return status;
*data = le16_to_cpu(data_local);
return 0;
}
/**
@@ -188,7 +141,7 @@ read_nvm_buf_aq_exit:
*
* This function will request NVM ownership.
*/
static enum ice_status
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
@@ -203,7 +156,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release NVM ownership.
*/
static void ice_release_nvm(struct ice_hw *hw)
void ice_release_nvm(struct ice_hw *hw)
{
if (hw->nvm.blank_nvm_mode)
return;
@@ -232,6 +185,239 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
return status;
}
/**
* ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
* @hw: pointer to hardware structure
* @module_tlv: pointer to module TLV to return
* @module_tlv_len: pointer to module TLV length to return
* @module_type: module type requested
*
* Finds the requested sub module TLV type from the Preserved Field
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
enum ice_status status;
u16 pfa_len, pfa_ptr;
u16 next_tlv;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
return status;
}
status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
while (next_tlv < pfa_ptr + pfa_len) {
u16 tlv_sub_module_type;
u16 tlv_len;
/* Read TLV type */
status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
break;
}
/* Read TLV length */
status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
break;
}
if (tlv_sub_module_type == module_type) {
if (tlv_len) {
*module_tlv = next_tlv;
*module_tlv_len = tlv_len;
return 0;
}
return ICE_ERR_INVAL_SIZE;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
*/
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
return ICE_ERR_DOES_NOT_EXIST;
}
/**
* ice_read_pba_string - Reads part number string from NVM
* @hw: pointer to hardware structure
* @pba_num: stores the part number string from the NVM
* @pba_num_size: part number string buffer length
*
* Reads the part number string from the NVM.
*/
enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
enum ice_status status;
u16 pba_word, pba_size;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
ICE_SR_PBA_BLOCK_PTR);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
return status;
}
/* pba_size is the next word */
status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
return status;
}
if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
}
/* Subtract one to get PBA word count (PBA Size word is included in
* total size)
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
return ICE_ERR_PARAM;
}
for (i = 0; i < pba_size; i++) {
status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
return status;
}
pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
pba_num[(i * 2) + 1] = pba_word & 0xFF;
}
pba_num[(pba_size * 2)] = '\0';
return status;
}
/**
* ice_get_orom_ver_info - Read Option ROM version information
* @hw: pointer to the HW struct
*
* Read the Combo Image version data from the Boot Configuration TLV and fill
* in the option ROM version data.
*/
static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
{
u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_orom_info *orom = &hw->nvm.orom;
enum ice_status status;
u32 combo_ver;
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read Boot Configuration Block TLV.\n");
return status;
}
/* Boot Configuration Block must have length at least 2 words
* (Combo Image Version High and Combo Image Version Low)
*/
if (boot_cfg_tlv_len < 2) {
ice_debug(hw, ICE_DBG_INIT,
"Invalid Boot Configuration Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
}
status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
&combo_hi);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
return status;
}
status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
&combo_lo);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
return status;
}
combo_ver = ((u32)combo_hi << 16) | combo_lo;
orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
ICE_OROM_VER_SHIFT);
orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
ICE_OROM_VER_BUILD_SHIFT);
return 0;
}
/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
*
* The device flash could be up to 16MB in size. However, it is possible that
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
enum ice_status status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
return status;
while ((max_size - min_size) > 1) {
u32 offset = (max_size + min_size) / 2;
u32 len = 1;
u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM,
"%s: New upper bound of %u bytes\n",
__func__, offset);
status = 0;
max_size = offset;
} else if (!status) {
ice_debug(hw, ICE_DBG_NVM,
"%s: New lower bound of %u bytes\n",
__func__, offset);
min_size = offset;
} else {
/* an unexpected error occurred */
goto err_read_flat_nvm;
}
}
ice_debug(hw, ICE_DBG_NVM,
"Predicted flash size is %u bytes\n", max_size);
hw->nvm.flash_size = max_size;
err_read_flat_nvm:
ice_release_nvm(hw);
return status;
}
/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
@@ -241,9 +427,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_nvm_info *nvm = &hw->nvm;
u16 eetrack_lo, eetrack_hi;
u16 eetrack_lo, eetrack_hi, ver;
enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -269,12 +454,14 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return ICE_ERR_NVM_BLANK_MODE;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver);
status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
return status;
}
nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
@@ -289,79 +476,48 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
status = ice_discover_flash_size(hw);
if (status) {
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: failed to discover flash size.\n");
return status;
}
switch (hw->device_id) {
/* the following devices do not have boot_cfg_tlv yet */
if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
hw->device_id == ICE_DEV_ID_E822C_QSFP ||
hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
hw->device_id == ICE_DEV_ID_E822C_SGMII ||
hw->device_id == ICE_DEV_ID_E822C_SFP ||
hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
hw->device_id == ICE_DEV_ID_E822L_SFP ||
hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
hw->device_id == ICE_DEV_ID_E822L_SGMII)
case ICE_DEV_ID_E823C_BACKPLANE:
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_10G_BASE_T:
case ICE_DEV_ID_E823C_SGMII:
case ICE_DEV_ID_E822C_BACKPLANE:
case ICE_DEV_ID_E822C_QSFP:
case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
case ICE_DEV_ID_E822C_SFP:
case ICE_DEV_ID_E822L_BACKPLANE:
case ICE_DEV_ID_E822L_SFP:
case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823L_QSFP:
return status;
default:
break;
}
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
status = ice_get_orom_ver_info(hw);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read Boot Configuration Block TLV.\n");
ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
return status;
}
/* Boot Configuration Block must have length at least 2 words
* (Combo Image Version High and Combo Image Version Low)
*/
if (boot_cfg_tlv_len < 2) {
ice_debug(hw, ICE_DBG_INIT,
"Invalid Boot Configuration Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
}
status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF),
&oem_hi);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n");
return status;
}
status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1),
&oem_lo);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n");
return status;
}
nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo;
return 0;
}
/**
* ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
* method. The buf read is preceded by the NVM ownership take
* and followed by the release.
*/
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
enum ice_status status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
status = ice_read_sr_buf_aq(hw, offset, words, data);
ice_release_nvm(hw);
}
return status;
}
/**
* ice_nvm_validate_checksum
* @hw: pointer to the HW struct

View File

@@ -4,5 +4,17 @@
#ifndef _ICE_NVM_H_
#define _ICE_NVM_H_
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
#endif /* _ICE_NVM_H_ */

View File

@@ -121,9 +121,7 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
/* fall through */
case ICE_AQ_LINK_SPEED_50GB:
/* fall through */
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;

View File

@@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
struct ice_aqc_get_sw_cfg_resp_elem *ele;
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
u8 type;
u8 res_type;
ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
@@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
type = le16_to_cpu(ele->vsi_port_num) >>
res_type = le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
continue;
}
ice_init_port_info(hw->port_info, vsi_port_num,
type, swid, pf_vf_num, is_vf);
res_type, swid, pf_vf_num, is_vf);
}
} while (req_desc && !status);
@@ -760,7 +760,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_ETHERTYPE_MAC:
daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */
fallthrough;
case ICE_SW_LKUP_ETHERTYPE:
off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
@@ -771,7 +771,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_PROMISC_VLAN:
vlan_id = f_info->l_data.mac_vlan.vlan_id;
/* fall-through */
fallthrough;
case ICE_SW_LKUP_PROMISC:
daddr = f_info->l_data.mac_vlan.mac_addr;
break;
@@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status;
u16 s_rule_size;
u16 type;
u16 rule_type;
int i;
if (!num_vsi)
@@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
return ICE_ERR_PARAM;
@@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
s_rule->type = cpu_to_le16(type);
s_rule->type = cpu_to_le16(rule_type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);

View File

@@ -453,10 +453,10 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fallthrough -- not supported action */
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping frame */
fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -1188,7 +1188,6 @@ ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_10GB:
/* fall through */
default:
itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
avg_pkt_size + 640);

View File

@@ -239,12 +239,21 @@ struct ice_fc_info {
enum ice_fc_mode req_mode; /* FC mode requested by caller */
};
/* Option ROM version information */
struct ice_orom_info {
u8 major; /* Major version of OROM */
u8 patch; /* Patch version of OROM */
u16 build; /* Build version of OROM */
};
/* NVM Information */
struct ice_nvm_info {
u32 eetrack; /* NVM data version */
u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */
struct ice_orom_info orom; /* Option ROM version info */
u32 eetrack; /* NVM data version */
u16 sr_words; /* Shadow RAM size in words */
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
@@ -626,7 +635,8 @@ struct ice_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define ICE_SR_BOOT_CFG_PTR 0x132
#define ICE_NVM_OEM_VER_OFF 0x02
#define ICE_NVM_OROM_VER_OFF 0x02
#define ICE_SR_PBA_BLOCK_PTR 0x16
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
@@ -634,12 +644,12 @@ struct ice_hw_port_stats {
#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
#define ICE_NVM_VER_HI_SHIFT 12
#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
#define ICE_OEM_VER_PATCH_SHIFT 0
#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT)
#define ICE_OEM_VER_BUILD_SHIFT 8
#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
#define ICE_OEM_VER_SHIFT 24
#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
#define ICE_OROM_VER_PATCH_SHIFT 0
#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT)
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512

File diff suppressed because it is too large Load Diff

View File

@@ -5,11 +5,6 @@
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
#define ICE_MAX_VLANID 4095
#define ICE_VLAN_PRIORITY_S 12
#define ICE_VLAN_M 0xFFF
#define ICE_PRIORITY_M 0x7000
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
#define ICE_MAX_MACADDR_PER_VF 12
@@ -26,18 +21,15 @@
#define ICE_PCI_CIAD_WAIT_COUNT 100
#define ICE_PCI_CIAD_WAIT_DELAY_US 1
/* VF resources default values and limitation */
/* VF resource constraints */
#define ICE_MAX_VF_COUNT 256
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_NUM_VF_MSIX_MED 17
#define ICE_NUM_VF_MSIX_SMALL 5
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -61,6 +53,13 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
/* VF MDD events print structure */
struct ice_mdd_vf_events {
u16 count; /* total count of Rx|Tx events */
/* count number of the last printed event */
u16 last_printed;
};
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -73,9 +72,9 @@ struct ice_vf {
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
u16 port_vlan_id;
DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
u16 port_vlan_info; /* Port VLAN ID and QoS */
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
u8 spoofchk:1;
@@ -89,14 +88,14 @@ struct ice_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
};
#ifdef CONFIG_PCI_IOV
@@ -111,6 +110,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -128,6 +128,9 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -135,6 +138,8 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id,
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -143,6 +148,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf,
return true;
}
static inline bool
ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
{
return true;
}
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs)

View File

@@ -183,7 +183,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
goto free_buf;
@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (if_running) {
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_umem_if_up;
}
}
@@ -471,11 +471,11 @@ xsk_umem_if_up:
if (!ret && umem_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
if (umem_failure) {
netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
umem_present ? "en" : "dis", umem_failure);
return umem_failure;
}
@@ -609,7 +609,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
*/
static bool
ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
bool alloc(struct ice_ring *, struct ice_rx_buf *))
bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -816,10 +816,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fallthrough -- not supported action */
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping frame */
fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -841,8 +841,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
bool failure = false;
struct xdp_buff xdp;
bool failure = 0;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
return (int)total_rx_packets;
}
return failure ? budget : (int)total_rx_packets;
}
@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);

View File

@@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
struct xdp_umem __always_unused *umem,
u16 __always_unused qid)
{
return -ENOTSUPP;
return -EOPNOTSUPP;
}
static inline void
@@ -63,7 +63,7 @@ static inline int
ice_xsk_wakeup(struct net_device __always_unused *netdev,
u32 __always_unused queue_id, u32 __always_unused flags)
{
return -ENOTSUPP;
return -EOPNOTSUPP;
}
#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)

View File

@@ -306,7 +306,7 @@ struct igb_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
struct igb_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {

View File

@@ -2183,27 +2183,6 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
if (ec->rx_max_coalesced_frames ||
ec->rx_coalesce_usecs_irq ||
ec->rx_max_coalesced_frames_irq ||
ec->tx_max_coalesced_frames ||
ec->tx_coalesce_usecs_irq ||
ec->stats_block_coalesce_usecs ||
ec->use_adaptive_rx_coalesce ||
ec->use_adaptive_tx_coalesce ||
ec->pkt_rate_low ||
ec->rx_coalesce_usecs_low ||
ec->rx_max_coalesced_frames_low ||
ec->tx_coalesce_usecs_low ||
ec->tx_max_coalesced_frames_low ||
ec->pkt_rate_high ||
ec->rx_coalesce_usecs_high ||
ec->rx_max_coalesced_frames_high ||
ec->tx_coalesce_usecs_high ||
ec->tx_max_coalesced_frames_high ||
ec->rate_sample_interval)
return -ENOTSUPP;
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -3477,6 +3456,7 @@ static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops igb_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,

View File

@@ -198,11 +198,11 @@ int igb_sysfs_init(struct igb_adapter *adapter)
}
/* init i2c_client */
client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
if (client == NULL) {
client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info);
if (IS_ERR(client)) {
dev_info(&adapter->pdev->dev,
"Failed to create new i2c device.\n");
rc = -ENODEV;
rc = PTR_ERR(client);
goto exit;
}
adapter->i2c_client = client;

View File

@@ -424,6 +424,7 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,

View File

@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_ethtool.o igc_ptp.o
igc_ethtool.o igc_ptp.o igc_dump.o

View File

@@ -42,6 +42,10 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -53,10 +57,13 @@ extern char igc_driver_version[];
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -108,7 +115,7 @@ extern char igc_driver_version[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
/* FIXME: These values were estimated using the ones that i210 has as
/* FIXME: These values were estimated using the ones that i225 has as
* basis, they seem to provide good numbers with ptp4l/phc2sys, but we
* need to confirm them.
*/
@@ -319,7 +326,7 @@ struct igc_q_vector {
struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
#define MAX_ETYPE_FILTER (4 - 1)
@@ -552,6 +559,7 @@ int igc_erase_filter(struct igc_adapter *adapter,
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,

View File

@@ -16,7 +16,10 @@
/* Wake Up Filter Control */
#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -259,6 +262,9 @@
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */

View File

@@ -0,0 +1,323 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc.h"
struct igc_reg_info {
u32 ofs;
char *name;
};
static const struct igc_reg_info igc_reg_info_tbl[] = {
/* General Registers */
{IGC_CTRL, "CTRL"},
{IGC_STATUS, "STATUS"},
{IGC_CTRL_EXT, "CTRL_EXT"},
{IGC_MDIC, "MDIC"},
/* Interrupt Registers */
{IGC_ICR, "ICR"},
/* RX Registers */
{IGC_RCTL, "RCTL"},
{IGC_RDLEN(0), "RDLEN"},
{IGC_RDH(0), "RDH"},
{IGC_RDT(0), "RDT"},
{IGC_RXDCTL(0), "RXDCTL"},
{IGC_RDBAL(0), "RDBAL"},
{IGC_RDBAH(0), "RDBAH"},
/* TX Registers */
{IGC_TCTL, "TCTL"},
{IGC_TDBAL(0), "TDBAL"},
{IGC_TDBAH(0), "TDBAH"},
{IGC_TDLEN(0), "TDLEN"},
{IGC_TDH(0), "TDH"},
{IGC_TDT(0), "TDT"},
{IGC_TXDCTL(0), "TXDCTL"},
{IGC_TDFH, "TDFH"},
{IGC_TDFT, "TDFT"},
{IGC_TDFHS, "TDFHS"},
{IGC_TDFPC, "TDFPC"},
/* List Terminator */
{}
};
/* igc_regdump - register printout routine */
static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
{
int n = 0;
char rname[16];
u32 regs[8];
switch (reginfo->ofs) {
case IGC_RDLEN(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDLEN(n));
break;
case IGC_RDH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDH(n));
break;
case IGC_RDT(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDT(n));
break;
case IGC_RXDCTL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RXDCTL(n));
break;
case IGC_RDBAL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAL(n));
break;
case IGC_RDBAH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAH(n));
break;
case IGC_TDBAL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_RDBAL(n));
break;
case IGC_TDBAH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDBAH(n));
break;
case IGC_TDLEN(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDLEN(n));
break;
case IGC_TDH(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDH(n));
break;
case IGC_TDT(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TDT(n));
break;
case IGC_TXDCTL(0):
for (n = 0; n < 4; n++)
regs[n] = rd32(IGC_TXDCTL(n));
break;
default:
pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
regs[2], regs[3]);
}
/* igc_rings_dump - Tx-rings and Rx-rings */
void igc_rings_dump(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct my_u0 { u64 a; u64 b; } *u0;
union igc_adv_tx_desc *tx_desc;
union igc_adv_rx_desc *rx_desc;
struct igc_ring *tx_ring;
struct igc_ring *rx_ring;
u32 staterr;
u16 i, n;
if (!netif_msg_hw(adapter))
return;
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start\n");
pr_info("%-15s %016lX %016lX\n", netdev->name,
netdev->state, dev_trans_start(netdev));
}
/* Print TX Ring Summary */
if (!netdev || !netif_running(netdev))
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igc_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp);
}
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
/* Transmit Descriptor Formats
*
* Advanced Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
* 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 38 36 35 32 31 24 15 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
struct igc_tx_buffer *buffer_info;
tx_desc = IGC_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
dma_unmap_len(buffer_info, len),
true);
}
}
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info(" %5d %5X %5X\n",
n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
goto exit;
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
/* Advanced Receive Descriptor (Read) Format
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
* +----------------------------------------------+------+
* 8 | Header Buffer Address [63:1] | DD |
* +-----------------------------------------------------+
*
*
* Advanced Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 30 21 20 17 16 4 3 0
* +------------------------------------------------------+
* 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
* | Checksum Ident | | | | Type | Type |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
struct igc_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGC_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
next_desc = " NTC";
else
next_desc = "";
if (staterr & IGC_RXD_STAT_DD) {
/* Descriptor Done */
pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
"RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
next_desc);
} else {
pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address
(buffer_info->page) +
buffer_info->page_offset,
igc_rx_bufsz(rx_ring),
true);
}
}
}
}
exit:
return;
}
/* igc_regs_dump - registers dump */
void igc_regs_dump(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
struct igc_reg_info *reginfo;
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
pr_info(" Register Name Value\n");
for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
reginfo->name; reginfo++) {
igc_regdump(hw, reginfo);
}
}

View File

@@ -308,6 +308,65 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
}
static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
wol->wolopts = 0;
if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC |
WAKE_PHY;
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
default:
break;
}
if (adapter->wol & IGC_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & IGC_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & IGC_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & IGC_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & IGC_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
}
static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
return -EOPNOTSUPP;
if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= IGC_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= IGC_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= IGC_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= IGC_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= IGC_WUFC_LNKC;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static u32 igc_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -802,27 +861,6 @@ static int igc_set_coalesce(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
if (ec->rx_max_coalesced_frames ||
ec->rx_coalesce_usecs_irq ||
ec->rx_max_coalesced_frames_irq ||
ec->tx_max_coalesced_frames ||
ec->tx_coalesce_usecs_irq ||
ec->stats_block_coalesce_usecs ||
ec->use_adaptive_rx_coalesce ||
ec->use_adaptive_tx_coalesce ||
ec->pkt_rate_low ||
ec->rx_coalesce_usecs_low ||
ec->rx_max_coalesced_frames_low ||
ec->tx_coalesce_usecs_low ||
ec->tx_max_coalesced_frames_low ||
ec->pkt_rate_high ||
ec->rx_coalesce_usecs_high ||
ec->rx_max_coalesced_frames_high ||
ec->tx_coalesce_usecs_high ||
ec->tx_max_coalesced_frames_high ||
ec->rate_sample_interval)
return -ENOTSUPP;
if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS ||
(ec->rx_coalesce_usecs > 3 &&
ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
@@ -1856,9 +1894,12 @@ static int igc_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops igc_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igc_get_drvinfo,
.get_regs_len = igc_get_regs_len,
.get_regs = igc_get_regs,
.get_wol = igc_get_wol,
.set_wol = igc_set_wol,
.get_msglevel = igc_get_msglevel,
.set_msglevel = igc_set_msglevel,
.nway_reset = igc_nway_reset,

View File

@@ -3546,6 +3546,8 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
igc_rings_dump(adapter);
igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
}
@@ -4029,6 +4031,9 @@ static void igc_watchdog_task(struct work_struct *work)
}
}
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
@@ -4114,6 +4119,8 @@ no_wait:
return;
}
}
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
/* also check for alternate media here */
} else if (!netif_carrier_ok(netdev) &&
@@ -4337,6 +4344,7 @@ request_done:
static int __igc_open(struct net_device *netdev, bool resuming)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
int err = 0;
int i = 0;
@@ -4348,6 +4356,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return -EBUSY;
}
if (!resuming)
pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -4386,6 +4397,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
rd32(IGC_ICR);
igc_irq_enable(adapter);
if (!resuming)
pm_runtime_put(&pdev->dev);
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
@@ -4404,6 +4418,8 @@ err_setup_rx:
igc_free_all_tx_resources(adapter);
err_setup_tx:
igc_reset(adapter);
if (!resuming)
pm_runtime_put(&pdev->dev);
return err;
}
@@ -4428,9 +4444,13 @@ static int igc_open(struct net_device *netdev)
static int __igc_close(struct net_device *netdev, bool suspending)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
if (!suspending)
pm_runtime_get_sync(&pdev->dev);
igc_down(adapter);
igc_release_hw_control(adapter);
@@ -4440,6 +4460,9 @@ static int __igc_close(struct net_device *netdev, bool suspending)
igc_free_all_tx_resources(adapter);
igc_free_all_rx_resources(adapter);
if (!suspending)
pm_runtime_put_sync(&pdev->dev);
return 0;
}
@@ -4766,6 +4789,16 @@ static int igc_probe(struct pci_dev *pdev,
hw->fc.requested_mode = igc_fc_default;
hw->fc.current_mode = igc_fc_default;
/* By default, support wake on port A */
adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
/* initialize the wol settings based on the eeprom settings */
if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
adapter->wol |= IGC_WUFC_MAG;
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -4792,6 +4825,10 @@ static int igc_probe(struct pci_dev *pdev,
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_put_noidle(&pdev->dev);
return 0;
err_register:
@@ -4826,6 +4863,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
pm_runtime_get_noresume(&pdev->dev);
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
@@ -4870,6 +4909,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igc_close(netdev, true);
igc_ptp_suspend(adapter);
igc_clear_interrupt_scheme(adapter);
rtnl_unlock();
@@ -5045,6 +5086,108 @@ static void igc_shutdown(struct pci_dev *pdev)
}
}
/**
* igc_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current PCI connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
**/
static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/**
* igc_io_slot_reset - called after the PCI bus has been reset.
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
"Could not re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/
hw->hw_addr = adapter->io_addr;
igc_reset(adapter);
wr32(IGC_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
}
return result;
}
/**
* igc_io_resume - called when traffic can start to flow again.
* @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
dev_err(&pdev->dev, "igc_open failed after reset\n");
return;
}
}
netif_device_attach(netdev);
/* let the f/w know that the h/w is now under the control of the
* driver.
*/
igc_get_hw_control(adapter);
rtnl_unlock();
}
static const struct pci_error_handlers igc_err_handler = {
.error_detected = igc_io_error_detected,
.slot_reset = igc_io_slot_reset,
.resume = igc_io_resume,
};
#ifdef CONFIG_PM
static const struct dev_pm_ops igc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
@@ -5062,6 +5205,7 @@ static struct pci_driver igc_driver = {
.driver.pm = &igc_pm_ops,
#endif
.shutdown = igc_shutdown,
.err_handler = &igc_err_handler,
};
/**

View File

@@ -509,7 +509,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
*/
void igc_ptp_tx_work(struct work_struct *work)
static void igc_ptp_tx_work(struct work_struct *work)
{
struct igc_adapter *adapter = container_of(work, struct igc_adapter,
ptp_tx_work);

View File

@@ -17,6 +17,11 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */

View File

@@ -462,7 +462,7 @@ struct ixgbe_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
};
#ifdef CONFIG_IXGBE_HWMON

View File

@@ -3444,6 +3444,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,

View File

@@ -968,8 +968,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int i, pos;
u8 buf[8];
u64 dsn;
if (!info)
return -EINVAL;
@@ -985,17 +984,11 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/* Serial Number */
/* Get the PCI-e Device Serial Number Capability */
pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
if (pos) {
pos += 4;
for (i = 0; i < 8; i++)
pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
dsn = pci_get_dsn(adapter->pdev);
if (dsn)
snprintf(info->serial_number, sizeof(info->serial_number),
"%02X%02X%02X%02X%02X%02X%02X%02X",
buf[7], buf[6], buf[5], buf[4],
buf[3], buf[2], buf[1], buf[0]);
} else
"%016llX", dsn);
else
snprintf(info->serial_number, sizeof(info->serial_number),
"Unknown");

View File

@@ -968,6 +968,7 @@ static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,