Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support AES128-CCM ciphers in kTLS, from Vakul Garg. 2) Add fib_sync_mem to control the amount of dirty memory we allow to queue up between synchronize RCU calls, from David Ahern. 3) Make flow classifier more lockless, from Vlad Buslov. 4) Add PHY downshift support to aquantia driver, from Heiner Kallweit. 5) Add SKB cache for TCP rx and tx, from Eric Dumazet. This reduces contention on SLAB spinlocks in heavy RPC workloads. 6) Partial GSO offload support in XFRM, from Boris Pismenny. 7) Add fast link down support to ethtool, from Heiner Kallweit. 8) Use siphash for IP ID generator, from Eric Dumazet. 9) Pull nexthops even further out from ipv4/ipv6 routes and FIB entries, from David Ahern. 10) Move skb->xmit_more into a per-cpu variable, from Florian Westphal. 11) Improve eBPF verifier speed and increase maximum program size, from Alexei Starovoitov. 12) Eliminate per-bucket spinlocks in rhashtable, and instead use bit spinlocks. From Neil Brown. 13) Allow tunneling with GUE encap in ipvs, from Jacky Hu. 14) Improve link partner cap detection in generic PHY code, from Heiner Kallweit. 15) Add layer 2 encap support to bpf_skb_adjust_room(), from Alan Maguire. 16) Remove SKB list implementation assumptions in SCTP, your's truly. 17) Various cleanups, optimizations, and simplifications in r8169 driver. From Heiner Kallweit. 18) Add memory accounting on TX and RX path of SCTP, from Xin Long. 19) Switch PHY drivers over to use dynamic featue detection, from Heiner Kallweit. 20) Support flow steering without masking in dpaa2-eth, from Ioana Ciocoi. 21) Implement ndo_get_devlink_port in netdevsim driver, from Jiri Pirko. 22) Increase the strict parsing of current and future netlink attributes, also export such policies to userspace. From Johannes Berg. 23) Allow DSA tag drivers to be modular, from Andrew Lunn. 24) Remove legacy DSA probing support, also from Andrew Lunn. 25) Allow ll_temac driver to be used on non-x86 platforms, from Esben Haabendal. 26) Add a generic tracepoint for TX queue timeouts to ease debugging, from Cong Wang. 27) More indirect call optimizations, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1763 commits) cxgb4: Fix error path in cxgb4_init_module net: phy: improve pause mode reporting in phy_print_status dt-bindings: net: Fix a typo in the phy-mode list for ethernet bindings net: macb: Change interrupt and napi enable order in open net: ll_temac: Improve error message on error IRQ net/sched: remove block pointer from common offload structure net: ethernet: support of_get_mac_address new ERR_PTR error net: usb: smsc: fix warning reported by kbuild test robot staging: octeon-ethernet: Fix of_get_mac_address ERR_PTR check net: dsa: support of_get_mac_address new ERR_PTR error net: dsa: sja1105: Fix status initialization in sja1105_get_ethtool_stats vrf: sit mtu should not be updated when vrf netdev is the link net: dsa: Fix error cleanup path in dsa_init_module l2tp: Fix possible NULL pointer dereference taprio: add null check on sched_nest to avoid potential null pointer dereference net: mvpp2: cls: fix less than zero check on a u32 variable net_sched: sch_fq: handle non connected flows net_sched: sch_fq: do not assume EDT packets are ordered net: hns3: use devm_kcalloc when allocating desc_cb net: hns3: some cleanup for struct hns3_enet_ring ...
This commit is contained in:
@@ -17,3 +17,4 @@ ice-y := ice_main.o \
|
||||
ice_txrx.o \
|
||||
ice_ethtool.o
|
||||
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
|
||||
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
|
||||
|
@@ -34,6 +34,7 @@
|
||||
#include "ice_devids.h"
|
||||
#include "ice_type.h"
|
||||
#include "ice_txrx.h"
|
||||
#include "ice_dcb.h"
|
||||
#include "ice_switch.h"
|
||||
#include "ice_common.h"
|
||||
#include "ice_sched.h"
|
||||
@@ -42,10 +43,21 @@
|
||||
|
||||
extern const char ice_drv_ver[];
|
||||
#define ICE_BAR0 0
|
||||
#define ICE_DFLT_NUM_DESC 128
|
||||
#define ICE_REQ_DESC_MULTIPLE 32
|
||||
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
|
||||
#define ICE_MAX_NUM_DESC 8160
|
||||
/* set default number of Rx/Tx descriptors to the minimum between
|
||||
* ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
|
||||
*/
|
||||
#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
|
||||
ALIGN(PAGE_SIZE / \
|
||||
sizeof(union ice_32byte_rx_desc), \
|
||||
ICE_REQ_DESC_MULTIPLE))
|
||||
#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
|
||||
ALIGN(PAGE_SIZE / \
|
||||
sizeof(struct ice_tx_desc), \
|
||||
ICE_REQ_DESC_MULTIPLE))
|
||||
|
||||
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
|
||||
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
|
||||
#define ICE_ETHTOOL_FWVER_LEN 32
|
||||
@@ -71,6 +83,8 @@ extern const char ice_drv_ver[];
|
||||
#define ICE_MAX_QS_PER_VF 256
|
||||
#define ICE_MIN_QS_PER_VF 1
|
||||
#define ICE_DFLT_QS_PER_VF 4
|
||||
#define ICE_NONQ_VECS_VF 1
|
||||
#define ICE_MAX_SCATTER_QS_PER_VF 16
|
||||
#define ICE_MAX_BASE_QS_PER_VF 16
|
||||
#define ICE_MAX_INTR_PER_VF 65
|
||||
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
|
||||
@@ -114,6 +128,23 @@ extern const char ice_drv_ver[];
|
||||
#define ice_for_each_q_vector(vsi, i) \
|
||||
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
|
||||
|
||||
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
|
||||
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
|
||||
|
||||
#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
|
||||
ICE_PROMISC_MCAST_TX | \
|
||||
ICE_PROMISC_UCAST_RX | \
|
||||
ICE_PROMISC_MCAST_RX | \
|
||||
ICE_PROMISC_VLAN_TX | \
|
||||
ICE_PROMISC_VLAN_RX)
|
||||
|
||||
#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
|
||||
|
||||
#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
|
||||
ICE_PROMISC_MCAST_RX | \
|
||||
ICE_PROMISC_VLAN_TX | \
|
||||
ICE_PROMISC_VLAN_RX)
|
||||
|
||||
struct ice_tc_info {
|
||||
u16 qoffset;
|
||||
u16 qcount_tx;
|
||||
@@ -123,7 +154,7 @@ struct ice_tc_info {
|
||||
|
||||
struct ice_tc_cfg {
|
||||
u8 numtc; /* Total number of enabled TCs */
|
||||
u8 ena_tc; /* TX map */
|
||||
u8 ena_tc; /* Tx map */
|
||||
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
|
||||
};
|
||||
|
||||
@@ -134,7 +165,7 @@ struct ice_res_tracker {
|
||||
};
|
||||
|
||||
struct ice_qs_cfg {
|
||||
struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
|
||||
struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
|
||||
unsigned long *pf_map;
|
||||
unsigned long pf_map_size;
|
||||
unsigned int q_count;
|
||||
@@ -224,6 +255,8 @@ struct ice_vsi {
|
||||
|
||||
s16 vf_id; /* VF ID for SR-IOV VSIs */
|
||||
|
||||
u16 ethtype; /* Ethernet protocol for pause frame */
|
||||
|
||||
/* RSS config */
|
||||
u16 rss_table_size; /* HW RSS table size */
|
||||
u16 rss_size; /* Allocated RSS queues */
|
||||
@@ -247,6 +280,7 @@ struct ice_vsi {
|
||||
u8 irqs_ready;
|
||||
u8 current_isup; /* Sync 'link up' logging */
|
||||
u8 stat_offsets_loaded;
|
||||
u8 vlan_ena;
|
||||
|
||||
/* queue information */
|
||||
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
|
||||
@@ -257,26 +291,34 @@ struct ice_vsi {
|
||||
u16 num_txq; /* Used Tx queues */
|
||||
u16 alloc_rxq; /* Allocated Rx queues */
|
||||
u16 num_rxq; /* Used Rx queues */
|
||||
u16 num_desc;
|
||||
u16 num_rx_desc;
|
||||
u16 num_tx_desc;
|
||||
struct ice_tc_cfg tc_cfg;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/* struct that defines an interrupt vector */
|
||||
struct ice_q_vector {
|
||||
struct ice_vsi *vsi;
|
||||
cpumask_t affinity_mask;
|
||||
struct napi_struct napi;
|
||||
struct ice_ring_container rx;
|
||||
struct ice_ring_container tx;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
|
||||
u16 v_idx; /* index in the vsi->q_vector array. */
|
||||
u8 num_ring_tx; /* total number of Tx rings in vector */
|
||||
u16 reg_idx;
|
||||
u8 num_ring_rx; /* total number of Rx rings in vector */
|
||||
char name[ICE_INT_NAME_STR_LEN];
|
||||
u8 num_ring_tx; /* total number of Tx rings in vector */
|
||||
u8 itr_countdown; /* when 0 should adjust adaptive ITR */
|
||||
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
|
||||
* value to the device
|
||||
*/
|
||||
u8 intrl;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
struct ice_ring_container rx;
|
||||
struct ice_ring_container tx;
|
||||
|
||||
cpumask_t affinity_mask;
|
||||
struct irq_affinity_notify affinity_notify;
|
||||
|
||||
char name[ICE_INT_NAME_STR_LEN];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum ice_pf_flags {
|
||||
@@ -285,7 +327,11 @@ enum ice_pf_flags {
|
||||
ICE_FLAG_RSS_ENA,
|
||||
ICE_FLAG_SRIOV_ENA,
|
||||
ICE_FLAG_SRIOV_CAPABLE,
|
||||
ICE_FLAG_DCB_CAPABLE,
|
||||
ICE_FLAG_DCB_ENA,
|
||||
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
|
||||
ICE_FLAG_DISABLE_FW_LLDP,
|
||||
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
|
||||
ICE_PF_FLAGS_NBITS /* must be last */
|
||||
};
|
||||
|
||||
@@ -324,8 +370,8 @@ struct ice_pf {
|
||||
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
|
||||
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
|
||||
u32 num_lan_msix; /* Total MSIX vectors for base driver */
|
||||
u16 num_lan_tx; /* num lan Tx queues setup */
|
||||
u16 num_lan_rx; /* num lan Rx queues setup */
|
||||
u16 num_lan_tx; /* num LAN Tx queues setup */
|
||||
u16 num_lan_rx; /* num LAN Rx queues setup */
|
||||
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
|
||||
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
|
||||
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
|
||||
@@ -339,6 +385,9 @@ struct ice_pf {
|
||||
struct ice_hw_port_stats stats_prev;
|
||||
struct ice_hw hw;
|
||||
u8 stat_prev_loaded; /* has previous stats been loaded */
|
||||
#ifdef CONFIG_DCB
|
||||
u16 dcbx_cap;
|
||||
#endif /* CONFIG_DCB */
|
||||
u32 tx_timeout_count;
|
||||
unsigned long tx_timeout_last_recovery;
|
||||
u32 tx_timeout_recovery_level;
|
||||
@@ -351,14 +400,15 @@ struct ice_netdev_priv {
|
||||
|
||||
/**
|
||||
* ice_irq_dynamic_ena - Enable default interrupt generation settings
|
||||
* @hw: pointer to hw struct
|
||||
* @vsi: pointer to vsi struct, can be NULL
|
||||
* @hw: pointer to HW struct
|
||||
* @vsi: pointer to VSI struct, can be NULL
|
||||
* @q_vector: pointer to q_vector, can be NULL
|
||||
*/
|
||||
static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
|
||||
struct ice_q_vector *q_vector)
|
||||
static inline void
|
||||
ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
|
||||
struct ice_q_vector *q_vector)
|
||||
{
|
||||
u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
|
||||
u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
|
||||
((struct ice_pf *)hw->back)->hw_oicr_idx;
|
||||
int itr = ICE_ITR_NONE;
|
||||
u32 val;
|
||||
@@ -374,10 +424,24 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
|
||||
wr32(hw, GLINT_DYN_CTL(vector), val);
|
||||
}
|
||||
|
||||
static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
|
||||
/**
|
||||
* ice_find_vsi_by_type - Find and return VSI of a given type
|
||||
* @pf: PF to search for VSI
|
||||
* @type: Value indicating type of VSI we are looking for
|
||||
*/
|
||||
static inline struct ice_vsi *
|
||||
ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
||||
{
|
||||
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
|
||||
vsi->tc_cfg.numtc = 1;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pf->num_alloc_vsi; i++) {
|
||||
struct ice_vsi *vsi = pf->vsi[i];
|
||||
|
||||
if (vsi && vsi->type == type)
|
||||
return vsi;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ice_set_ethtool_ops(struct net_device *netdev);
|
||||
@@ -388,5 +452,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
|
||||
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
|
||||
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
|
||||
void ice_napi_del(struct ice_vsi *vsi);
|
||||
#ifdef CONFIG_DCB
|
||||
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
|
||||
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
|
||||
#endif /* CONFIG_DCB */
|
||||
|
||||
#endif /* _ICE_H_ */
|
||||
|
@@ -62,7 +62,7 @@ struct ice_aqc_req_res {
|
||||
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
|
||||
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
|
||||
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
|
||||
/* For SDP: pin id of the SDP */
|
||||
/* For SDP: pin ID of the SDP */
|
||||
__le32 res_number;
|
||||
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
|
||||
__le16 status;
|
||||
@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
|
||||
__le32 teid[1];
|
||||
};
|
||||
|
||||
/* Query Port ETS (indirect 0x040E)
|
||||
*
|
||||
* This indirect command is used to query port TC node configuration.
|
||||
*/
|
||||
struct ice_aqc_query_port_ets {
|
||||
__le32 port_teid;
|
||||
__le32 reserved;
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
struct ice_aqc_port_ets_elem {
|
||||
u8 tc_valid_bits;
|
||||
u8 reserved[3];
|
||||
/* 3 bits for UP per TC 0-7, 4th byte reserved */
|
||||
__le32 up2tc;
|
||||
u8 tc_bw_share[8];
|
||||
__le32 port_eir_prof_id;
|
||||
__le32 port_cir_prof_id;
|
||||
/* 3 bits per Node priority to TC 0-7, 4th byte reserved */
|
||||
__le32 tc_node_prio;
|
||||
#define ICE_TC_NODE_PRIO_S 0x4
|
||||
u8 reserved1[4];
|
||||
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
|
||||
};
|
||||
|
||||
/* Query Scheduler Resource Allocation (indirect 0x0412)
|
||||
* This indirect command retrieves the scheduler resources allocated by
|
||||
* EMP Firmware to the given PF.
|
||||
@@ -953,8 +979,9 @@ struct ice_aqc_set_phy_cfg_data {
|
||||
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
|
||||
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
|
||||
u8 caps;
|
||||
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
|
||||
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
|
||||
#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
|
||||
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
|
||||
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
|
||||
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
|
||||
#define ICE_AQ_PHY_ENA_LINK BIT(3)
|
||||
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
|
||||
@@ -1023,7 +1050,7 @@ struct ice_aqc_get_link_status_data {
|
||||
u8 ext_info;
|
||||
#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
|
||||
#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
|
||||
/* Port TX Suspended */
|
||||
/* Port Tx Suspended */
|
||||
#define ICE_AQ_LINK_TX_S 2
|
||||
#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
|
||||
#define ICE_AQ_LINK_TX_ACTIVE 0
|
||||
@@ -1119,9 +1146,9 @@ struct ice_aqc_nvm {
|
||||
};
|
||||
|
||||
/**
|
||||
* Send to PF command (indirect 0x0801) id is only used by PF
|
||||
* Send to PF command (indirect 0x0801) ID is only used by PF
|
||||
*
|
||||
* Send to VF command (indirect 0x0802) id is only used by PF
|
||||
* Send to VF command (indirect 0x0802) ID is only used by PF
|
||||
*
|
||||
*/
|
||||
struct ice_aqc_pf_vf_msg {
|
||||
@@ -1131,6 +1158,126 @@ struct ice_aqc_pf_vf_msg {
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* Get LLDP MIB (indirect 0x0A00)
|
||||
* Note: This is also used by the LLDP MIB Change Event (0x0A01)
|
||||
* as the format is the same.
|
||||
*/
|
||||
struct ice_aqc_lldp_get_mib {
|
||||
u8 type;
|
||||
#define ICE_AQ_LLDP_MIB_TYPE_S 0
|
||||
#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
|
||||
#define ICE_AQ_LLDP_MIB_LOCAL 0
|
||||
#define ICE_AQ_LLDP_MIB_REMOTE 1
|
||||
#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
|
||||
#define ICE_AQ_LLDP_BRID_TYPE_S 2
|
||||
#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
|
||||
#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
|
||||
#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
|
||||
/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
|
||||
#define ICE_AQ_LLDP_TX_S 0x4
|
||||
#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
|
||||
#define ICE_AQ_LLDP_TX_ACTIVE 0
|
||||
#define ICE_AQ_LLDP_TX_SUSPENDED 1
|
||||
#define ICE_AQ_LLDP_TX_FLUSHED 3
|
||||
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
|
||||
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
|
||||
* Get LLDP MIB (0x0A00) response only.
|
||||
*/
|
||||
u8 reserved1;
|
||||
__le16 local_len;
|
||||
__le16 remote_len;
|
||||
u8 reserved2[2];
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* Configure LLDP MIB Change Event (direct 0x0A01) */
|
||||
/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
|
||||
struct ice_aqc_lldp_set_mib_change {
|
||||
u8 command;
|
||||
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
|
||||
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
/* Stop LLDP (direct 0x0A05) */
|
||||
struct ice_aqc_lldp_stop {
|
||||
u8 command;
|
||||
#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
|
||||
#define ICE_AQ_LLDP_AGENT_STOP 0x0
|
||||
#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
|
||||
#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
/* Start LLDP (direct 0x0A06) */
|
||||
struct ice_aqc_lldp_start {
|
||||
u8 command;
|
||||
#define ICE_AQ_LLDP_AGENT_START BIT(0)
|
||||
#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
/* Get CEE DCBX Oper Config (0x0A07)
|
||||
* The command uses the generic descriptor struct and
|
||||
* returns the struct below as an indirect response.
|
||||
*/
|
||||
struct ice_aqc_get_cee_dcb_cfg_resp {
|
||||
u8 oper_num_tc;
|
||||
u8 oper_prio_tc[4];
|
||||
u8 oper_tc_bw[8];
|
||||
u8 oper_pfc_en;
|
||||
__le16 oper_app_prio;
|
||||
#define ICE_AQC_CEE_APP_FCOE_S 0
|
||||
#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
|
||||
#define ICE_AQC_CEE_APP_ISCSI_S 3
|
||||
#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
|
||||
#define ICE_AQC_CEE_APP_FIP_S 8
|
||||
#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
|
||||
__le32 tlv_status;
|
||||
#define ICE_AQC_CEE_PG_STATUS_S 0
|
||||
#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
|
||||
#define ICE_AQC_CEE_PFC_STATUS_S 3
|
||||
#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
|
||||
#define ICE_AQC_CEE_FCOE_STATUS_S 8
|
||||
#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
|
||||
#define ICE_AQC_CEE_ISCSI_STATUS_S 11
|
||||
#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
|
||||
#define ICE_AQC_CEE_FIP_STATUS_S 16
|
||||
#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
|
||||
u8 reserved[12];
|
||||
};
|
||||
|
||||
/* Set Local LLDP MIB (indirect 0x0A08)
|
||||
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
|
||||
*/
|
||||
struct ice_aqc_lldp_set_local_mib {
|
||||
u8 type;
|
||||
#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
|
||||
#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
|
||||
#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
|
||||
#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
|
||||
#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
|
||||
u8 reserved0;
|
||||
__le16 length;
|
||||
u8 reserved1[4];
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* Stop/Start LLDP Agent (direct 0x0A09)
|
||||
* Used for stopping/starting specific LLDP agent. e.g. DCBx.
|
||||
* The same structure is used for the response, with the command field
|
||||
* being used as the status field.
|
||||
*/
|
||||
struct ice_aqc_lldp_stop_start_specific_agent {
|
||||
u8 command;
|
||||
#define ICE_AQC_START_STOP_AGENT_M BIT(0)
|
||||
#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
|
||||
#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
|
||||
u8 reserved[15];
|
||||
};
|
||||
|
||||
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
|
||||
struct ice_aqc_get_set_rss_key {
|
||||
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
|
||||
@@ -1144,6 +1291,9 @@ struct ice_aqc_get_set_rss_key {
|
||||
|
||||
#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
|
||||
#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
|
||||
#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
|
||||
(ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
|
||||
ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
|
||||
|
||||
struct ice_aqc_get_set_rss_keys {
|
||||
u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
|
||||
@@ -1185,7 +1335,7 @@ struct ice_aqc_get_set_rss_lut {
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* Add TX LAN Queues (indirect 0x0C30) */
|
||||
/* Add Tx LAN Queues (indirect 0x0C30) */
|
||||
struct ice_aqc_add_txqs {
|
||||
u8 num_qgrps;
|
||||
u8 reserved[3];
|
||||
@@ -1194,7 +1344,7 @@ struct ice_aqc_add_txqs {
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* This is the descriptor of each queue entry for the Add TX LAN Queues
|
||||
/* This is the descriptor of each queue entry for the Add Tx LAN Queues
|
||||
* command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
|
||||
*/
|
||||
struct ice_aqc_add_txqs_perq {
|
||||
@@ -1206,7 +1356,7 @@ struct ice_aqc_add_txqs_perq {
|
||||
struct ice_aqc_txsched_elem info;
|
||||
};
|
||||
|
||||
/* The format of the command buffer for Add TX LAN Queues (0x0C30)
|
||||
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
|
||||
* is an array of the following structs. Please note that the length of
|
||||
* each struct ice_aqc_add_tx_qgrp is variable due
|
||||
* to the variable number of queues in each group!
|
||||
@@ -1218,7 +1368,7 @@ struct ice_aqc_add_tx_qgrp {
|
||||
struct ice_aqc_add_txqs_perq txqs[1];
|
||||
};
|
||||
|
||||
/* Disable TX LAN Queues (indirect 0x0C31) */
|
||||
/* Disable Tx LAN Queues (indirect 0x0C31) */
|
||||
struct ice_aqc_dis_txqs {
|
||||
u8 cmd_type;
|
||||
#define ICE_AQC_Q_DIS_CMD_S 0
|
||||
@@ -1240,7 +1390,7 @@ struct ice_aqc_dis_txqs {
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
|
||||
/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
|
||||
* contains the following structures, arrayed one after the
|
||||
* other.
|
||||
* Note: Since the q_id is 16 bits wide, if the
|
||||
@@ -1387,8 +1537,15 @@ struct ice_aq_desc {
|
||||
struct ice_aqc_get_topo get_topo;
|
||||
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
|
||||
struct ice_aqc_query_txsched_res query_sched_res;
|
||||
struct ice_aqc_query_port_ets port_ets;
|
||||
struct ice_aqc_nvm nvm;
|
||||
struct ice_aqc_pf_vf_msg virt;
|
||||
struct ice_aqc_lldp_get_mib lldp_get_mib;
|
||||
struct ice_aqc_lldp_set_mib_change lldp_set_event;
|
||||
struct ice_aqc_lldp_stop lldp_stop;
|
||||
struct ice_aqc_lldp_start lldp_start;
|
||||
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
|
||||
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
|
||||
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
|
||||
struct ice_aqc_get_set_rss_key get_set_rss_key;
|
||||
struct ice_aqc_add_txqs add_txqs;
|
||||
@@ -1421,6 +1578,8 @@ struct ice_aq_desc {
|
||||
/* error codes */
|
||||
enum ice_aq_err {
|
||||
ICE_AQ_RC_OK = 0, /* Success */
|
||||
ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
|
||||
ICE_AQ_RC_ENOENT = 2, /* No such element */
|
||||
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
|
||||
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
|
||||
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
|
||||
@@ -1473,6 +1632,7 @@ enum ice_adminq_opc {
|
||||
ice_aqc_opc_get_sched_elems = 0x0404,
|
||||
ice_aqc_opc_suspend_sched_elems = 0x0409,
|
||||
ice_aqc_opc_resume_sched_elems = 0x040A,
|
||||
ice_aqc_opc_query_port_ets = 0x040E,
|
||||
ice_aqc_opc_delete_sched_elems = 0x040F,
|
||||
ice_aqc_opc_query_sched_res = 0x0412,
|
||||
|
||||
@@ -1490,6 +1650,14 @@ enum ice_adminq_opc {
|
||||
/* PF/VF mailbox commands */
|
||||
ice_mbx_opc_send_msg_to_pf = 0x0801,
|
||||
ice_mbx_opc_send_msg_to_vf = 0x0802,
|
||||
/* LLDP commands */
|
||||
ice_aqc_opc_lldp_get_mib = 0x0A00,
|
||||
ice_aqc_opc_lldp_set_mib_change = 0x0A01,
|
||||
ice_aqc_opc_lldp_stop = 0x0A05,
|
||||
ice_aqc_opc_lldp_start = 0x0A06,
|
||||
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
|
||||
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
|
||||
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
|
||||
|
||||
/* RSS commands */
|
||||
ice_aqc_opc_set_rss_key = 0x0B02,
|
||||
@@ -1497,7 +1665,7 @@ enum ice_adminq_opc {
|
||||
ice_aqc_opc_get_rss_key = 0x0B04,
|
||||
ice_aqc_opc_get_rss_lut = 0x0B05,
|
||||
|
||||
/* TX queue handling commands/events */
|
||||
/* Tx queue handling commands/events */
|
||||
ice_aqc_opc_add_txqs = 0x0C30,
|
||||
ice_aqc_opc_dis_txqs = 0x0C31,
|
||||
|
||||
|
@@ -31,7 +31,7 @@
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* This function sets the MAC type of the adapter based on the
|
||||
* vendor ID and device ID stored in the hw structure.
|
||||
* vendor ID and device ID stored in the HW structure.
|
||||
*/
|
||||
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
|
||||
{
|
||||
@@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_aq_manage_mac_read - manage MAC address read command
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @buf: a virtual buffer to hold the manage MAC read response
|
||||
* @buf_size: Size of the virtual buffer
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
|
||||
*
|
||||
* Get Link Status (0x607). Returns the link status of the adapter.
|
||||
*/
|
||||
static enum ice_status
|
||||
enum ice_status
|
||||
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
|
||||
struct ice_link_status *link, struct ice_sq_cd *cd)
|
||||
{
|
||||
@@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
|
||||
/* flag cleared so calling functions don't call AQ again */
|
||||
pi->phy.get_link_info = false;
|
||||
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
|
||||
*/
|
||||
case ICE_RXDID_FLEX_NIC:
|
||||
case ICE_RXDID_FLEX_NIC_2:
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
|
||||
ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
|
||||
ICE_RXFLG_FIN, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
|
||||
ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
|
||||
ICE_FLG_FIN, idx++);
|
||||
/* flex flag 1 is not used for flexi-flag programming, skipping
|
||||
* these four FLG64 bits.
|
||||
*/
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
|
||||
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
|
||||
ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
|
||||
ICE_RXFLG_EVLAN_x9100, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
|
||||
ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
|
||||
ICE_RXFLG_TNL0, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
|
||||
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
|
||||
ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
|
||||
ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
|
||||
ICE_FLG_EVLAN_x9100, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
|
||||
ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
|
||||
ICE_FLG_TNL0, idx++);
|
||||
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
|
||||
ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -418,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
|
||||
|
||||
/**
|
||||
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*/
|
||||
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
|
||||
{
|
||||
@@ -438,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*/
|
||||
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
|
||||
{
|
||||
@@ -477,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_cfg_fw_log - configure FW logging
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @enable: enable certain FW logging events if true, disable all if false
|
||||
*
|
||||
* This function enables/disables the FW logging via Rx CQ events and a UART
|
||||
@@ -626,7 +626,7 @@ out:
|
||||
|
||||
/**
|
||||
* ice_output_fw_log
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @desc: pointer to the AQ message descriptor
|
||||
* @buf: pointer to the buffer accompanying the AQ message
|
||||
*
|
||||
@@ -642,12 +642,12 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
|
||||
|
||||
/**
|
||||
* ice_get_itr_intrl_gran - determine int/intrl granularity
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Determines the itr/intrl granularities based on the maximum aggregate
|
||||
* bandwidth according to the device's configuration during power-on.
|
||||
*/
|
||||
static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
|
||||
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
|
||||
{
|
||||
u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
|
||||
GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
|
||||
@@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
|
||||
hw->itr_gran = ICE_ITR_GRAN_MAX_25;
|
||||
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
|
||||
break;
|
||||
default:
|
||||
ice_debug(hw, ICE_DBG_INIT,
|
||||
"Failed to determine itr/intrl granularity\n");
|
||||
return ICE_ERR_CFG;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = ice_get_itr_intrl_gran(hw);
|
||||
if (status)
|
||||
return status;
|
||||
ice_get_itr_intrl_gran(hw);
|
||||
|
||||
status = ice_init_all_ctrlq(hw);
|
||||
if (status)
|
||||
@@ -731,7 +723,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
|
||||
goto err_unroll_cqinit;
|
||||
}
|
||||
|
||||
/* set the back pointer to hw */
|
||||
/* set the back pointer to HW */
|
||||
hw->port_info->hw = hw;
|
||||
|
||||
/* Initialize port_info struct with switch configuration data */
|
||||
@@ -988,7 +980,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
|
||||
* @ice_rxq_ctx: pointer to the rxq context
|
||||
* @rxq_index: the index of the Rx queue
|
||||
*
|
||||
* Copies rxq context from dense structure to hw register space
|
||||
* Copies rxq context from dense structure to HW register space
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
|
||||
@@ -1001,7 +993,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
|
||||
if (rxq_index > QRX_CTRL_MAX_INDEX)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
/* Copy each dword separately to hw */
|
||||
/* Copy each dword separately to HW */
|
||||
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
|
||||
wr32(hw, QRX_CONTEXT(i, rxq_index),
|
||||
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
|
||||
@@ -1045,7 +1037,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
|
||||
* @rxq_index: the index of the Rx queue
|
||||
*
|
||||
* Converts rxq context from sparse to dense structure and then writes
|
||||
* it to hw register space
|
||||
* it to HW register space
|
||||
*/
|
||||
enum ice_status
|
||||
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
|
||||
@@ -1100,8 +1092,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
|
||||
*
|
||||
* Dumps debug log about control command with descriptor contents.
|
||||
*/
|
||||
void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
|
||||
void *buf, u16 buf_len)
|
||||
void
|
||||
ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
|
||||
u16 buf_len)
|
||||
{
|
||||
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
|
||||
u16 len;
|
||||
@@ -1143,7 +1136,7 @@ void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
|
||||
|
||||
/**
|
||||
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @desc: descriptor describing the command
|
||||
* @buf: buffer to use for indirect commands (NULL for direct commands)
|
||||
* @buf_size: size of buffer for indirect commands (0 for direct commands)
|
||||
@@ -1160,7 +1153,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
|
||||
|
||||
/**
|
||||
* ice_aq_get_fw_ver
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Get the firmware version (0x0001) from the admin queue commands
|
||||
@@ -1194,7 +1187,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
|
||||
|
||||
/**
|
||||
* ice_aq_q_shutdown
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @unloading: is the driver unloading itself
|
||||
*
|
||||
* Tell the Firmware that we're shutting down the AdminQ and whether
|
||||
@@ -1217,8 +1210,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
|
||||
|
||||
/**
|
||||
* ice_aq_req_res
|
||||
* @hw: pointer to the hw struct
|
||||
* @res: resource id
|
||||
* @hw: pointer to the HW struct
|
||||
* @res: resource ID
|
||||
* @access: access type
|
||||
* @sdp_number: resource number
|
||||
* @timeout: the maximum time in ms that the driver may hold the resource
|
||||
@@ -1303,8 +1296,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
|
||||
|
||||
/**
|
||||
* ice_aq_release_res
|
||||
* @hw: pointer to the hw struct
|
||||
* @res: resource id
|
||||
* @hw: pointer to the HW struct
|
||||
* @res: resource ID
|
||||
* @sdp_number: resource number
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
@@ -1330,7 +1323,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
|
||||
/**
|
||||
* ice_acquire_res
|
||||
* @hw: pointer to the HW structure
|
||||
* @res: resource id
|
||||
* @res: resource ID
|
||||
* @access: access type (read or write)
|
||||
* @timeout: timeout in milliseconds
|
||||
*
|
||||
@@ -1392,7 +1385,7 @@ ice_acquire_res_exit:
|
||||
/**
|
||||
* ice_release_res
|
||||
* @hw: pointer to the HW structure
|
||||
* @res: resource id
|
||||
* @res: resource ID
|
||||
*
|
||||
* This function will release a resource using the proper Admin Command.
|
||||
*/
|
||||
@@ -1404,7 +1397,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
|
||||
status = ice_aq_release_res(hw, res, 0, NULL);
|
||||
|
||||
/* there are some rare cases when trying to release the resource
|
||||
* results in an admin Q timeout, so handle them correctly
|
||||
* results in an admin queue timeout, so handle them correctly
|
||||
*/
|
||||
while ((status == ICE_ERR_AQ_TIMEOUT) &&
|
||||
(total_delay < hw->adminq.sq_cmd_timeout)) {
|
||||
@@ -1415,13 +1408,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_guar_num_vsi - determine number of guar VSI for a PF
|
||||
* @hw: pointer to the hw structure
|
||||
* ice_get_num_per_func - determine number of resources per PF
|
||||
* @hw: pointer to the HW structure
|
||||
* @max: value to be evenly split between each PF
|
||||
*
|
||||
* Determine the number of valid functions by going through the bitmap returned
|
||||
* from parsing capabilities and use this to calculate the number of VSI per PF.
|
||||
* from parsing capabilities and use this to calculate the number of resources
|
||||
* per PF based on the max value passed in.
|
||||
*/
|
||||
static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
|
||||
static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
|
||||
{
|
||||
u8 funcs;
|
||||
|
||||
@@ -1432,12 +1427,12 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
|
||||
if (!funcs)
|
||||
return 0;
|
||||
|
||||
return ICE_MAX_VSI / funcs;
|
||||
return max / funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_parse_caps - parse function/device capabilities
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @buf: pointer to a buffer containing function/device capability records
|
||||
* @cap_count: number of capability records in the list
|
||||
* @opc: type of capabilities list to parse
|
||||
@@ -1512,7 +1507,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
|
||||
"HW caps: Dev.VSI cnt = %d\n",
|
||||
dev_p->num_vsi_allocd_to_host);
|
||||
} else if (func_p) {
|
||||
func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
|
||||
func_p->guar_num_vsi =
|
||||
ice_get_num_per_func(hw, ICE_MAX_VSI);
|
||||
ice_debug(hw, ICE_DBG_INIT,
|
||||
"HW caps: Func.VSI cnt = %d\n",
|
||||
number);
|
||||
@@ -1578,7 +1574,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
|
||||
|
||||
/**
|
||||
* ice_aq_discover_caps - query function/device capabilities
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @buf: a virtual buffer to hold the capabilities
|
||||
* @buf_size: Size of the virtual buffer
|
||||
* @cap_count: cap count needed if AQ err==ENOMEM
|
||||
@@ -1617,8 +1613,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
|
||||
* @hw: pointer to the hardware structure
|
||||
* @opc: capabilities type to discover - pass in the command opcode
|
||||
*/
|
||||
static enum ice_status ice_discover_caps(struct ice_hw *hw,
|
||||
enum ice_adminq_opc opc)
|
||||
static enum ice_status
|
||||
ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
|
||||
{
|
||||
enum ice_status status;
|
||||
u32 cap_count;
|
||||
@@ -1677,7 +1673,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_aq_manage_mac_write - manage MAC address write command
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
|
||||
* @flags: flags to control write behavior
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -1705,7 +1701,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
|
||||
|
||||
/**
|
||||
* ice_aq_clear_pxe_mode
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Tell the firmware that the driver is taking over from PXE (0x0110).
|
||||
*/
|
||||
@@ -1721,7 +1717,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_clear_pxe_mode - clear pxe operations mode
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Make sure all PXE mode settings are cleared, including things
|
||||
* like descriptor fetch/write-back mode.
|
||||
@@ -1737,10 +1733,10 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
|
||||
* @phy_type_low: lower part of phy_type
|
||||
* @phy_type_high: higher part of phy_type
|
||||
*
|
||||
* This helper function will convert an entry in phy type structure
|
||||
* This helper function will convert an entry in PHY type structure
|
||||
* [phy_type_low, phy_type_high] to its corresponding link speed.
|
||||
* Note: In the structure of [phy_type_low, phy_type_high], there should
|
||||
* be one bit set, as this function will convert one phy type to its
|
||||
* be one bit set, as this function will convert one PHY type to its
|
||||
* speed.
|
||||
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
|
||||
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
|
||||
@@ -1884,10 +1880,10 @@ void
|
||||
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
|
||||
u16 link_speeds_bitmap)
|
||||
{
|
||||
u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
|
||||
u64 pt_high;
|
||||
u64 pt_low;
|
||||
int index;
|
||||
u16 speed;
|
||||
|
||||
/* We first check with low part of phy_type */
|
||||
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
|
||||
@@ -1910,7 +1906,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
|
||||
|
||||
/**
|
||||
* ice_aq_set_phy_cfg
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @lport: logical port number
|
||||
* @cfg: structure with PHY configuration data to be set
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -1929,6 +1925,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
|
||||
if (!cfg)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
/* Ensure that only valid bits of cfg->caps can be turned on. */
|
||||
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
|
||||
ice_debug(hw, ICE_DBG_PHY,
|
||||
"Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
|
||||
cfg->caps);
|
||||
|
||||
cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
|
||||
}
|
||||
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
|
||||
desc.params.set_phy.lport_num = lport;
|
||||
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
||||
@@ -2016,7 +2021,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
|
||||
if (!pcaps)
|
||||
return ICE_ERR_NO_MEMORY;
|
||||
|
||||
/* Get the current phy config */
|
||||
/* Get the current PHY config */
|
||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
||||
NULL);
|
||||
if (status) {
|
||||
@@ -2027,8 +2032,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
|
||||
/* clear the old pause settings */
|
||||
cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
|
||||
ICE_AQC_PHY_EN_RX_LINK_PAUSE);
|
||||
|
||||
/* set the new capabilities */
|
||||
cfg.caps |= pause_mask;
|
||||
|
||||
/* If the capabilities have changed, then set the new config */
|
||||
if (cfg.caps != pcaps->caps) {
|
||||
int retry_count, retry_max = 10;
|
||||
@@ -2135,6 +2142,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
|
||||
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_set_event_mask
|
||||
* @hw: pointer to the HW struct
|
||||
* @port_num: port number of the physical function
|
||||
* @mask: event mask to be set
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* Set event mask (0x0613)
|
||||
*/
|
||||
enum ice_status
|
||||
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
|
||||
struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aqc_set_event_mask *cmd;
|
||||
struct ice_aq_desc desc;
|
||||
|
||||
cmd = &desc.params.set_event_mask;
|
||||
|
||||
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
|
||||
|
||||
cmd->lport_num = port_num;
|
||||
|
||||
cmd->event_mask = cpu_to_le16(mask);
|
||||
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_set_port_id_led
|
||||
* @pi: pointer to the port information
|
||||
@@ -2297,7 +2330,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
|
||||
|
||||
/**
|
||||
* __ice_aq_get_set_rss_key
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_id: VSI FW index
|
||||
* @key: pointer to key info struct
|
||||
* @set: set true to set the key, false to get the key
|
||||
@@ -2332,7 +2365,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
|
||||
|
||||
/**
|
||||
* ice_aq_get_rss_key
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: software VSI handle
|
||||
* @key: pointer to key info struct
|
||||
*
|
||||
@@ -2351,7 +2384,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
|
||||
|
||||
/**
|
||||
* ice_aq_set_rss_key
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: software VSI handle
|
||||
* @keys: pointer to key info struct
|
||||
*
|
||||
@@ -2436,7 +2469,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
|
||||
* @num_qgrps: number of groups in the list
|
||||
* @qg_list: the list of groups to disable
|
||||
* @buf_size: the total size of the qg_list buffer in bytes
|
||||
* @rst_src: if called due to reset, specifies the RST source
|
||||
* @rst_src: if called due to reset, specifies the reset source
|
||||
* @vmvf_num: the relative VM or VF number that is undergoing the reset
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
@@ -2476,7 +2509,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
|
||||
break;
|
||||
case ICE_VF_RESET:
|
||||
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
|
||||
/* In this case, FW expects vmvf_num to be absolute VF id */
|
||||
/* In this case, FW expects vmvf_num to be absolute VF ID */
|
||||
cmd->vmvf_and_timeout |=
|
||||
cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
|
||||
ICE_AQC_Q_DIS_VMVF_NUM_M);
|
||||
@@ -2534,8 +2567,8 @@ do_aq:
|
||||
* @dest_ctx: the context to be written to
|
||||
* @ce_info: a description of the struct to be filled
|
||||
*/
|
||||
static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
|
||||
const struct ice_ctx_ele *ce_info)
|
||||
static void
|
||||
ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
|
||||
{
|
||||
u8 src_byte, dest_byte, mask;
|
||||
u8 *from, *dest;
|
||||
@@ -2573,8 +2606,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
|
||||
* @dest_ctx: the context to be written to
|
||||
* @ce_info: a description of the struct to be filled
|
||||
*/
|
||||
static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
|
||||
const struct ice_ctx_ele *ce_info)
|
||||
static void
|
||||
ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
|
||||
{
|
||||
u16 src_word, mask;
|
||||
__le16 dest_word;
|
||||
@@ -2616,8 +2649,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
|
||||
* @dest_ctx: the context to be written to
|
||||
* @ce_info: a description of the struct to be filled
|
||||
*/
|
||||
static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
|
||||
const struct ice_ctx_ele *ce_info)
|
||||
static void
|
||||
ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
|
||||
{
|
||||
u32 src_dword, mask;
|
||||
__le32 dest_dword;
|
||||
@@ -2667,8 +2700,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
|
||||
* @dest_ctx: the context to be written to
|
||||
* @ce_info: a description of the struct to be filled
|
||||
*/
|
||||
static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
|
||||
const struct ice_ctx_ele *ce_info)
|
||||
static void
|
||||
ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
|
||||
{
|
||||
u64 src_qword, mask;
|
||||
__le64 dest_qword;
|
||||
@@ -2749,25 +2782,51 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc: TC number
|
||||
* @q_handle: software queue handle
|
||||
*/
|
||||
static struct ice_q_ctx *
|
||||
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
|
||||
{
|
||||
struct ice_vsi_ctx *vsi;
|
||||
struct ice_q_ctx *q_ctx;
|
||||
|
||||
vsi = ice_get_vsi_ctx(hw, vsi_handle);
|
||||
if (!vsi)
|
||||
return NULL;
|
||||
if (q_handle >= vsi->num_lan_q_entries[tc])
|
||||
return NULL;
|
||||
if (!vsi->lan_q_ctx[tc])
|
||||
return NULL;
|
||||
q_ctx = vsi->lan_q_ctx[tc];
|
||||
return &q_ctx[q_handle];
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_ena_vsi_txq
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc: tc number
|
||||
* @tc: TC number
|
||||
* @q_handle: software queue handle
|
||||
* @num_qgrps: Number of added queue groups
|
||||
* @buf: list of queue groups to be added
|
||||
* @buf_size: size of buffer for indirect command
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* This function adds one lan q
|
||||
* This function adds one LAN queue
|
||||
*/
|
||||
enum ice_status
|
||||
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
|
||||
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
|
||||
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
|
||||
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
|
||||
struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aqc_txsched_elem_data node = { 0 };
|
||||
struct ice_sched_node *parent;
|
||||
struct ice_q_ctx *q_ctx;
|
||||
enum ice_status status;
|
||||
struct ice_hw *hw;
|
||||
|
||||
@@ -2784,6 +2843,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
|
||||
|
||||
mutex_lock(&pi->sched_lock);
|
||||
|
||||
q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
|
||||
if (!q_ctx) {
|
||||
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
|
||||
q_handle);
|
||||
status = ICE_ERR_PARAM;
|
||||
goto ena_txq_exit;
|
||||
}
|
||||
|
||||
/* find a parent node */
|
||||
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
|
||||
ICE_SCHED_NODE_OWNER_LAN);
|
||||
@@ -2803,14 +2870,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
|
||||
* Bit 5-6.
|
||||
* - Bit 7 is reserved.
|
||||
* Without setting the generic section as valid in valid_sections, the
|
||||
* Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
|
||||
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
|
||||
*/
|
||||
buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
|
||||
|
||||
/* add the lan q */
|
||||
/* add the LAN queue */
|
||||
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
|
||||
if (status) {
|
||||
ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
|
||||
ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
|
||||
le16_to_cpu(buf->txqs[0].txq_id),
|
||||
hw->adminq.sq_last_status);
|
||||
goto ena_txq_exit;
|
||||
@@ -2818,8 +2885,9 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
|
||||
|
||||
node.node_teid = buf->txqs[0].q_teid;
|
||||
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
|
||||
q_ctx->q_handle = q_handle;
|
||||
|
||||
/* add a leaf node into schduler tree q layer */
|
||||
/* add a leaf node into schduler tree queue layer */
|
||||
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
|
||||
|
||||
ena_txq_exit:
|
||||
@@ -2830,35 +2898,43 @@ ena_txq_exit:
|
||||
/**
|
||||
* ice_dis_vsi_txq
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc: TC number
|
||||
* @num_queues: number of queues
|
||||
* @q_handles: pointer to software queue handle array
|
||||
* @q_ids: pointer to the q_id array
|
||||
* @q_teids: pointer to queue node teids
|
||||
* @rst_src: if called due to reset, specifies the RST source
|
||||
* @rst_src: if called due to reset, specifies the reset source
|
||||
* @vmvf_num: the relative VM or VF number that is undergoing the reset
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* This function removes queues and their corresponding nodes in SW DB
|
||||
*/
|
||||
enum ice_status
|
||||
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
|
||||
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
|
||||
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
|
||||
u16 *q_handles, u16 *q_ids, u32 *q_teids,
|
||||
enum ice_disq_rst_src rst_src, u16 vmvf_num,
|
||||
struct ice_sq_cd *cd)
|
||||
{
|
||||
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
|
||||
struct ice_aqc_dis_txq_item qg_list;
|
||||
struct ice_q_ctx *q_ctx;
|
||||
u16 i;
|
||||
|
||||
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
|
||||
return ICE_ERR_CFG;
|
||||
|
||||
/* if queue is disabled already yet the disable queue command has to be
|
||||
* sent to complete the VF reset, then call ice_aq_dis_lan_txq without
|
||||
* any queue information
|
||||
*/
|
||||
|
||||
if (!num_queues && rst_src)
|
||||
return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
|
||||
NULL);
|
||||
if (!num_queues) {
|
||||
/* if queue is disabled already yet the disable queue command
|
||||
* has to be sent to complete the VF reset, then call
|
||||
* ice_aq_dis_lan_txq without any queue information
|
||||
*/
|
||||
if (rst_src)
|
||||
return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
|
||||
vmvf_num, NULL);
|
||||
return ICE_ERR_CFG;
|
||||
}
|
||||
|
||||
mutex_lock(&pi->sched_lock);
|
||||
|
||||
@@ -2868,6 +2944,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
|
||||
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
|
||||
if (!node)
|
||||
continue;
|
||||
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
|
||||
if (!q_ctx) {
|
||||
ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
|
||||
q_handles[i]);
|
||||
continue;
|
||||
}
|
||||
if (q_ctx->q_handle != q_handles[i]) {
|
||||
ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
|
||||
q_ctx->q_handle, q_handles[i]);
|
||||
continue;
|
||||
}
|
||||
qg_list.parent_teid = node->info.parent_teid;
|
||||
qg_list.num_qs = 1;
|
||||
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
|
||||
@@ -2878,18 +2965,19 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
|
||||
if (status)
|
||||
break;
|
||||
ice_free_sched_node(pi, node);
|
||||
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
|
||||
}
|
||||
mutex_unlock(&pi->sched_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_cfg_vsi_qs - configure the new/exisiting VSI queues
|
||||
* ice_cfg_vsi_qs - configure the new/existing VSI queues
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc_bitmap: TC bitmap
|
||||
* @maxqs: max queues array per TC
|
||||
* @owner: lan or rdma
|
||||
* @owner: LAN or RDMA
|
||||
*
|
||||
* This function adds/updates the VSI queues per TC.
|
||||
*/
|
||||
@@ -2908,7 +2996,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
|
||||
|
||||
mutex_lock(&pi->sched_lock);
|
||||
|
||||
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
|
||||
ice_for_each_traffic_class(i) {
|
||||
/* configuration is possible only if TC node is present */
|
||||
if (!ice_sched_get_tc_node(pi, i))
|
||||
continue;
|
||||
@@ -2924,13 +3012,13 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_cfg_vsi_lan - configure VSI lan queues
|
||||
* ice_cfg_vsi_lan - configure VSI LAN queues
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc_bitmap: TC bitmap
|
||||
* @max_lanqs: max lan queues array per TC
|
||||
* @max_lanqs: max LAN queues array per TC
|
||||
*
|
||||
* This function adds/updates the VSI lan queues per TC.
|
||||
* This function adds/updates the VSI LAN queues per TC.
|
||||
*/
|
||||
enum ice_status
|
||||
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
|
||||
@@ -2942,7 +3030,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
|
||||
|
||||
/**
|
||||
* ice_replay_pre_init - replay pre initialization
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
|
||||
*/
|
||||
@@ -2966,7 +3054,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_replay_vsi - replay VSI configuration
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: driver VSI handle
|
||||
*
|
||||
* Restore all VSI configuration after reset. It is required to call this
|
||||
@@ -2993,7 +3081,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
|
||||
|
||||
/**
|
||||
* ice_replay_post - post replay configuration cleanup
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Post replay cleanup.
|
||||
*/
|
||||
@@ -3012,8 +3100,9 @@ void ice_replay_post(struct ice_hw *hw)
|
||||
* @prev_stat: ptr to previous loaded stat value
|
||||
* @cur_stat: ptr to current stat value
|
||||
*/
|
||||
void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
|
||||
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
|
||||
void
|
||||
ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
|
||||
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
|
||||
{
|
||||
u64 new_data;
|
||||
|
||||
@@ -3043,8 +3132,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
|
||||
* @prev_stat: ptr to previous loaded stat value
|
||||
* @cur_stat: ptr to current stat value
|
||||
*/
|
||||
void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
|
||||
u64 *prev_stat, u64 *cur_stat)
|
||||
void
|
||||
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
|
||||
u64 *prev_stat, u64 *cur_stat)
|
||||
{
|
||||
u32 new_data;
|
||||
|
||||
@@ -3063,3 +3153,28 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
|
||||
/* to manage the potential roll-over */
|
||||
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_query_elem - query element information from HW
|
||||
* @hw: pointer to the HW struct
|
||||
* @node_teid: node TEID to be queried
|
||||
* @buf: buffer to element information
|
||||
*
|
||||
* This function queries HW element information
|
||||
*/
|
||||
enum ice_status
|
||||
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
|
||||
struct ice_aqc_get_elem *buf)
|
||||
{
|
||||
u16 buf_size, num_elem_ret = 0;
|
||||
enum ice_status status;
|
||||
|
||||
buf_size = sizeof(*buf);
|
||||
memset(buf, 0, buf_size);
|
||||
buf->generic[0].node_teid = cpu_to_le32(node_teid);
|
||||
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
|
||||
NULL);
|
||||
if (status || num_elem_ret != 1)
|
||||
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
|
||||
return status;
|
||||
}
|
||||
|
@@ -9,8 +9,8 @@
|
||||
#include "ice_switch.h"
|
||||
#include <linux/avf/virtchnl.h>
|
||||
|
||||
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
|
||||
u16 buf_len);
|
||||
void
|
||||
ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
|
||||
enum ice_status ice_init_hw(struct ice_hw *hw);
|
||||
void ice_deinit_hw(struct ice_hw *hw);
|
||||
enum ice_status ice_check_reset(struct ice_hw *hw);
|
||||
@@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
|
||||
enum ice_aq_res_access_type access, u32 timeout);
|
||||
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
|
||||
enum ice_status ice_init_nvm(struct ice_hw *hw);
|
||||
enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
|
||||
u16 *data);
|
||||
enum ice_status
|
||||
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
|
||||
enum ice_status
|
||||
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
|
||||
struct ice_aq_desc *desc, void *buf, u16 buf_size,
|
||||
@@ -89,25 +89,37 @@ enum ice_status
|
||||
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
|
||||
struct ice_sq_cd *cd);
|
||||
enum ice_status
|
||||
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
|
||||
struct ice_link_status *link, struct ice_sq_cd *cd);
|
||||
enum ice_status
|
||||
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
|
||||
struct ice_sq_cd *cd);
|
||||
enum ice_status
|
||||
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
|
||||
struct ice_sq_cd *cd);
|
||||
|
||||
enum ice_status
|
||||
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
|
||||
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
|
||||
struct ice_sq_cd *cmd_details);
|
||||
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
|
||||
u16 *q_handle, u16 *q_ids, u32 *q_teids,
|
||||
enum ice_disq_rst_src rst_src, u16 vmvf_num,
|
||||
struct ice_sq_cd *cd);
|
||||
enum ice_status
|
||||
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
|
||||
u16 *max_lanqs);
|
||||
enum ice_status
|
||||
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
|
||||
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
|
||||
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
|
||||
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
|
||||
struct ice_sq_cd *cd);
|
||||
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
|
||||
void ice_replay_post(struct ice_hw *hw);
|
||||
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
|
||||
void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
|
||||
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
|
||||
void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
|
||||
u64 *prev_stat, u64 *cur_stat);
|
||||
void
|
||||
ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
|
||||
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
|
||||
void
|
||||
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
|
||||
u64 *prev_stat, u64 *cur_stat);
|
||||
enum ice_status
|
||||
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
|
||||
struct ice_aqc_get_elem *buf);
|
||||
#endif /* _ICE_COMMON_H_ */
|
||||
|
@@ -51,7 +51,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_check_sq_alive
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @cq: pointer to the specific Control queue
|
||||
*
|
||||
* Returns true if Queue is enabled else false.
|
||||
@@ -287,7 +287,7 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
|
||||
* @hw: pointer to the hardware structure
|
||||
* @cq: pointer to the specific Control queue
|
||||
*
|
||||
* Configure base address and length registers for the receive (event q)
|
||||
* Configure base address and length registers for the receive (event queue)
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
|
||||
@@ -751,7 +751,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
|
||||
|
||||
/**
|
||||
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @cq: pointer to the specific Control queue
|
||||
*
|
||||
* Returns true if the firmware has processed all descriptors on the
|
||||
@@ -767,7 +767,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
|
||||
|
||||
/**
|
||||
* ice_sq_send_cmd - send command to Control Queue (ATQ)
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @cq: pointer to the specific Control queue
|
||||
* @desc: prefilled descriptor describing the command (non DMA mem)
|
||||
* @buf: buffer to use for indirect commands (or NULL for direct commands)
|
||||
@@ -962,7 +962,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
|
||||
|
||||
/**
|
||||
* ice_clean_rq_elem
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @cq: pointer to the specific Control queue
|
||||
* @e: event info from the receive descriptor, includes any buffers
|
||||
* @pending: number of events that could be left to process
|
||||
|
@@ -79,6 +79,7 @@ struct ice_rq_event_info {
|
||||
/* Control Queue information */
|
||||
struct ice_ctl_q_info {
|
||||
enum ice_ctl_q qtype;
|
||||
enum ice_aq_err rq_last_status; /* last status on receive queue */
|
||||
struct ice_ctl_q_ring rq; /* receive queue */
|
||||
struct ice_ctl_q_ring sq; /* send queue */
|
||||
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
|
||||
@@ -86,10 +87,9 @@ struct ice_ctl_q_info {
|
||||
u16 num_sq_entries; /* send queue depth */
|
||||
u16 rq_buf_size; /* receive queue buffer size */
|
||||
u16 sq_buf_size; /* send queue buffer size */
|
||||
enum ice_aq_err sq_last_status; /* last status on send queue */
|
||||
struct mutex sq_lock; /* Send queue lock */
|
||||
struct mutex rq_lock; /* Receive queue lock */
|
||||
enum ice_aq_err sq_last_status; /* last status on send queue */
|
||||
enum ice_aq_err rq_last_status; /* last status on receive queue */
|
||||
};
|
||||
|
||||
#endif /* _ICE_CONTROLQ_H_ */
|
||||
|
1392
drivers/net/ethernet/intel/ice/ice_dcb.c
Normal file
1392
drivers/net/ethernet/intel/ice/ice_dcb.c
Normal file
File diff suppressed because it is too large
Load Diff
179
drivers/net/ethernet/intel/ice/ice_dcb.h
Normal file
179
drivers/net/ethernet/intel/ice/ice_dcb.h
Normal file
@@ -0,0 +1,179 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2019, Intel Corporation. */
|
||||
|
||||
#ifndef _ICE_DCB_H_
|
||||
#define _ICE_DCB_H_
|
||||
|
||||
#include "ice_type.h"
|
||||
|
||||
#define ICE_DCBX_STATUS_NOT_STARTED 0
|
||||
#define ICE_DCBX_STATUS_IN_PROGRESS 1
|
||||
#define ICE_DCBX_STATUS_DONE 2
|
||||
#define ICE_DCBX_STATUS_DIS 7
|
||||
|
||||
#define ICE_TLV_TYPE_END 0
|
||||
#define ICE_TLV_TYPE_ORG 127
|
||||
|
||||
#define ICE_IEEE_8021QAZ_OUI 0x0080C2
|
||||
#define ICE_IEEE_SUBTYPE_ETS_CFG 9
|
||||
#define ICE_IEEE_SUBTYPE_ETS_REC 10
|
||||
#define ICE_IEEE_SUBTYPE_PFC_CFG 11
|
||||
#define ICE_IEEE_SUBTYPE_APP_PRI 12
|
||||
|
||||
#define ICE_CEE_DCBX_OUI 0x001B21
|
||||
#define ICE_CEE_DCBX_TYPE 2
|
||||
#define ICE_CEE_SUBTYPE_PG_CFG 2
|
||||
#define ICE_CEE_SUBTYPE_PFC_CFG 3
|
||||
#define ICE_CEE_SUBTYPE_APP_PRI 4
|
||||
#define ICE_CEE_MAX_FEAT_TYPE 3
|
||||
/* Defines for LLDP TLV header */
|
||||
#define ICE_LLDP_TLV_LEN_S 0
|
||||
#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
|
||||
#define ICE_LLDP_TLV_TYPE_S 9
|
||||
#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
|
||||
#define ICE_LLDP_TLV_SUBTYPE_S 0
|
||||
#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
|
||||
#define ICE_LLDP_TLV_OUI_S 8
|
||||
#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
|
||||
|
||||
/* Defines for IEEE ETS TLV */
|
||||
#define ICE_IEEE_ETS_MAXTC_S 0
|
||||
#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
|
||||
#define ICE_IEEE_ETS_CBS_S 6
|
||||
#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
|
||||
#define ICE_IEEE_ETS_WILLING_S 7
|
||||
#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
|
||||
#define ICE_IEEE_ETS_PRIO_0_S 0
|
||||
#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
|
||||
#define ICE_IEEE_ETS_PRIO_1_S 4
|
||||
#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
|
||||
#define ICE_CEE_PGID_PRIO_0_S 0
|
||||
#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
|
||||
#define ICE_CEE_PGID_PRIO_1_S 4
|
||||
#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
|
||||
#define ICE_CEE_PGID_STRICT 15
|
||||
|
||||
/* Defines for IEEE TSA types */
|
||||
#define ICE_IEEE_TSA_STRICT 0
|
||||
#define ICE_IEEE_TSA_ETS 2
|
||||
|
||||
/* Defines for IEEE PFC TLV */
|
||||
#define ICE_IEEE_PFC_CAP_S 0
|
||||
#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
|
||||
#define ICE_IEEE_PFC_MBC_S 6
|
||||
#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
|
||||
#define ICE_IEEE_PFC_WILLING_S 7
|
||||
#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
|
||||
|
||||
/* Defines for IEEE APP TLV */
|
||||
#define ICE_IEEE_APP_SEL_S 0
|
||||
#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
|
||||
#define ICE_IEEE_APP_PRIO_S 5
|
||||
#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
|
||||
|
||||
/* TLV definitions for preparing MIB */
|
||||
#define ICE_IEEE_TLV_ID_ETS_CFG 3
|
||||
#define ICE_IEEE_TLV_ID_ETS_REC 4
|
||||
#define ICE_IEEE_TLV_ID_PFC_CFG 5
|
||||
#define ICE_IEEE_TLV_ID_APP_PRI 6
|
||||
#define ICE_TLV_ID_END_OF_LLDPPDU 7
|
||||
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
|
||||
|
||||
#define ICE_IEEE_ETS_TLV_LEN 25
|
||||
#define ICE_IEEE_PFC_TLV_LEN 6
|
||||
#define ICE_IEEE_APP_TLV_LEN 11
|
||||
|
||||
/* IEEE 802.1AB LLDP Organization specific TLV */
|
||||
struct ice_lldp_org_tlv {
|
||||
__be16 typelen;
|
||||
__be32 ouisubtype;
|
||||
u8 tlvinfo[1];
|
||||
} __packed;
|
||||
|
||||
struct ice_cee_tlv_hdr {
|
||||
__be16 typelen;
|
||||
u8 operver;
|
||||
u8 maxver;
|
||||
};
|
||||
|
||||
struct ice_cee_ctrl_tlv {
|
||||
struct ice_cee_tlv_hdr hdr;
|
||||
__be32 seqno;
|
||||
__be32 ackno;
|
||||
};
|
||||
|
||||
struct ice_cee_feat_tlv {
|
||||
struct ice_cee_tlv_hdr hdr;
|
||||
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
|
||||
#define ICE_CEE_FEAT_TLV_ENA_M 0x80
|
||||
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
|
||||
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
|
||||
u8 subtype;
|
||||
u8 tlvinfo[1];
|
||||
};
|
||||
|
||||
struct ice_cee_app_prio {
|
||||
__be16 protocol;
|
||||
u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
|
||||
#define ICE_CEE_APP_SELECTOR_M 0x03
|
||||
__be16 lower_oui;
|
||||
u8 prio_map;
|
||||
} __packed;
|
||||
|
||||
u8 ice_get_dcbx_status(struct ice_hw *hw);
|
||||
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
|
||||
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
|
||||
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
|
||||
enum ice_status ice_init_dcb(struct ice_hw *hw);
|
||||
enum ice_status
|
||||
ice_query_port_ets(struct ice_port_info *pi,
|
||||
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
|
||||
struct ice_sq_cd *cmd_details);
|
||||
#ifdef CONFIG_DCB
|
||||
enum ice_status
|
||||
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
|
||||
struct ice_sq_cd *cd);
|
||||
enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
|
||||
enum ice_status
|
||||
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
|
||||
bool *dcbx_agent_status, struct ice_sq_cd *cd);
|
||||
enum ice_status
|
||||
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
|
||||
struct ice_sq_cd *cd);
|
||||
#else /* CONFIG_DCB */
|
||||
static inline enum ice_status
|
||||
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
|
||||
bool __always_unused shutdown_lldp_agent,
|
||||
struct ice_sq_cd __always_unused *cd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum ice_status
|
||||
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
|
||||
struct ice_sq_cd __always_unused *cd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum ice_status
|
||||
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
|
||||
bool __always_unused start_dcbx_agent,
|
||||
bool *dcbx_agent_status,
|
||||
struct ice_sq_cd __always_unused *cd)
|
||||
{
|
||||
*dcbx_agent_status = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum ice_status
|
||||
ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
|
||||
bool __always_unused ena_update,
|
||||
struct ice_sq_cd __always_unused *cd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DCB */
|
||||
#endif /* _ICE_DCB_H_ */
|
551
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
Normal file
551
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
Normal file
@@ -0,0 +1,551 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2019, Intel Corporation. */
|
||||
|
||||
#include "ice_dcb_lib.h"
|
||||
|
||||
/**
|
||||
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
|
||||
* @dcbcfg: DCB config to evaluate for enabled TCs
|
||||
*/
|
||||
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
|
||||
{
|
||||
u8 i, num_tc, ena_tc = 1;
|
||||
|
||||
num_tc = ice_dcb_get_num_tc(dcbcfg);
|
||||
|
||||
for (i = 0; i < num_tc; i++)
|
||||
ena_tc |= BIT(i);
|
||||
|
||||
return ena_tc;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
|
||||
* @dcbcfg: config to retrieve number of TCs from
|
||||
*/
|
||||
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
|
||||
{
|
||||
bool tc_unused = false;
|
||||
u8 num_tc = 0;
|
||||
u8 ret = 0;
|
||||
int i;
|
||||
|
||||
/* Scan the ETS Config Priority Table to find traffic classes
|
||||
* enabled and create a bitmask of enabled TCs
|
||||
*/
|
||||
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
|
||||
num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
|
||||
|
||||
/* Scan bitmask for contiguous TCs starting with TC0 */
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
if (num_tc & BIT(i)) {
|
||||
if (!tc_unused) {
|
||||
ret++;
|
||||
} else {
|
||||
pr_err("Non-contiguous TCs - Disabling DCB\n");
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
tc_unused = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* There is always at least 1 TC */
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
|
||||
* @vsi: VSI owner of rings being updated
|
||||
*/
|
||||
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_ring *tx_ring, *rx_ring;
|
||||
u16 qoffset, qcount;
|
||||
int i, n;
|
||||
|
||||
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
|
||||
/* Reset the TC information */
|
||||
for (i = 0; i < vsi->num_txq; i++) {
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
tx_ring->dcb_tc = 0;
|
||||
}
|
||||
for (i = 0; i < vsi->num_rxq; i++) {
|
||||
rx_ring = vsi->rx_rings[i];
|
||||
rx_ring->dcb_tc = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ice_for_each_traffic_class(n) {
|
||||
if (!(vsi->tc_cfg.ena_tc & BIT(n)))
|
||||
break;
|
||||
|
||||
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
|
||||
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
|
||||
for (i = qoffset; i < (qoffset + qcount); i++) {
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
rx_ring = vsi->rx_rings[i];
|
||||
tx_ring->dcb_tc = n;
|
||||
rx_ring->dcb_tc = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
|
||||
* @pf: pointer to the PF struct
|
||||
*
|
||||
* Assumed caller has already disabled all VSIs before
|
||||
* calling this function. Reconfiguring DCB based on
|
||||
* local_dcbx_cfg.
|
||||
*/
|
||||
static void ice_pf_dcb_recfg(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
u8 tc_map = 0;
|
||||
int v, ret;
|
||||
|
||||
/* Update each VSI */
|
||||
ice_for_each_vsi(pf, v) {
|
||||
if (!pf->vsi[v])
|
||||
continue;
|
||||
|
||||
if (pf->vsi[v]->type == ICE_VSI_PF)
|
||||
tc_map = ice_dcb_get_ena_tc(dcbcfg);
|
||||
else
|
||||
tc_map = ICE_DFLT_TRAFFIC_CLASS;
|
||||
|
||||
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
|
||||
if (ret)
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to config TC for VSI index: %d\n",
|
||||
pf->vsi[v]->idx);
|
||||
else
|
||||
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pf_dcb_cfg - Apply new DCB configuration
|
||||
* @pf: pointer to the PF struct
|
||||
* @new_cfg: DCBX config to apply
|
||||
*/
|
||||
static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
|
||||
{
|
||||
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
int ret = 0;
|
||||
|
||||
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
|
||||
/* Enable DCB tagging only when more than one TC */
|
||||
if (ice_dcb_get_num_tc(new_cfg) > 1) {
|
||||
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
|
||||
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
} else {
|
||||
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
|
||||
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
}
|
||||
|
||||
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
|
||||
dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Store old config in case FW config fails */
|
||||
old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
|
||||
memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
|
||||
|
||||
/* avoid race conditions by holding the lock while disabling and
|
||||
* re-enabling the VSI
|
||||
*/
|
||||
rtnl_lock();
|
||||
ice_pf_dis_all_vsi(pf, true);
|
||||
|
||||
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
|
||||
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
|
||||
|
||||
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
|
||||
* the new config came from the HW in the first place.
|
||||
*/
|
||||
if (pf->hw.port_info->is_sw_lldp) {
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
|
||||
/* Restore previous settings to local config */
|
||||
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ice_pf_dcb_recfg(pf);
|
||||
|
||||
out:
|
||||
ice_pf_ena_all_vsi(pf, true);
|
||||
rtnl_unlock();
|
||||
devm_kfree(&pf->pdev->dev, old_cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dcb_rebuild - rebuild DCB post reset
|
||||
* @pf: physical function instance
|
||||
*/
|
||||
void ice_dcb_rebuild(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
struct ice_dcbx_cfg *prev_cfg;
|
||||
enum ice_status ret;
|
||||
u8 willing;
|
||||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
/* If DCB was not enabled previously, we are done */
|
||||
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
|
||||
return;
|
||||
|
||||
/* Save current willing state and force FW to unwilling */
|
||||
willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
|
||||
pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
/* Retrieve DCB config and ensure same as current in SW */
|
||||
prev_cfg = devm_kmemdup(&pf->pdev->dev,
|
||||
&pf->hw.port_info->local_dcbx_cfg,
|
||||
sizeof(*prev_cfg), GFP_KERNEL);
|
||||
if (!prev_cfg) {
|
||||
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
ice_init_dcb(&pf->hw);
|
||||
if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
|
||||
sizeof(*prev_cfg))) {
|
||||
/* difference in cfg detected - disable DCB till next MIB */
|
||||
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
|
||||
devm_kfree(&pf->pdev->dev, prev_cfg);
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
/* fetched config congruent to previous configuration */
|
||||
devm_kfree(&pf->pdev->dev, prev_cfg);
|
||||
|
||||
/* Configuration replayed - reset willing state to previous */
|
||||
pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
dcb_error:
|
||||
dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
|
||||
prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
|
||||
prev_cfg->etscfg.willing = true;
|
||||
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
|
||||
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
|
||||
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
|
||||
ice_pf_dcb_cfg(pf, prev_cfg);
|
||||
devm_kfree(&pf->pdev->dev, prev_cfg);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dcb_init_cfg - set the initial DCB config in SW
|
||||
* @pf: pf to apply config to
|
||||
*/
|
||||
static int ice_dcb_init_cfg(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_dcbx_cfg *newcfg;
|
||||
struct ice_port_info *pi;
|
||||
int ret = 0;
|
||||
|
||||
pi = pf->hw.port_info;
|
||||
newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
|
||||
if (!newcfg)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
|
||||
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
|
||||
|
||||
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
|
||||
if (ice_pf_dcb_cfg(pf, newcfg))
|
||||
ret = -EINVAL;
|
||||
|
||||
devm_kfree(&pf->pdev->dev, newcfg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dcb_sw_default_config - Apply a default DCB config
|
||||
* @pf: pf to apply config to
|
||||
*/
|
||||
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
struct ice_dcbx_cfg *dcbcfg;
|
||||
struct ice_port_info *pi;
|
||||
struct ice_hw *hw;
|
||||
int ret;
|
||||
|
||||
hw = &pf->hw;
|
||||
pi = hw->port_info;
|
||||
dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
|
||||
|
||||
memset(dcbcfg, 0, sizeof(*dcbcfg));
|
||||
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
|
||||
|
||||
dcbcfg->etscfg.willing = 1;
|
||||
dcbcfg->etscfg.maxtcs = 8;
|
||||
dcbcfg->etscfg.tcbwtable[0] = 100;
|
||||
dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
|
||||
|
||||
memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
|
||||
sizeof(dcbcfg->etsrec));
|
||||
dcbcfg->etsrec.willing = 0;
|
||||
|
||||
dcbcfg->pfc.willing = 1;
|
||||
dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
|
||||
|
||||
dcbcfg->numapps = 1;
|
||||
dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
|
||||
dcbcfg->app[0].priority = 3;
|
||||
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
|
||||
|
||||
ret = ice_pf_dcb_cfg(pf, dcbcfg);
|
||||
devm_kfree(&pf->pdev->dev, dcbcfg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_init_pf_dcb - initialize DCB for a PF
|
||||
* @pf: pf to initiialize DCB for
|
||||
*/
|
||||
int ice_init_pf_dcb(struct ice_pf *pf)
|
||||
{
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct ice_port_info *port_info;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
int sw_default = 0;
|
||||
int err;
|
||||
|
||||
port_info = hw->port_info;
|
||||
|
||||
/* check if device is DCB capable */
|
||||
if (!hw->func_caps.common_cap.dcb) {
|
||||
dev_dbg(dev, "DCB not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Best effort to put DCBx and LLDP into a good state */
|
||||
port_info->dcbx_status = ice_get_dcbx_status(hw);
|
||||
if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
|
||||
port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
|
||||
bool dcbx_status;
|
||||
|
||||
/* Attempt to start LLDP engine. Ignore errors
|
||||
* as this will error if it is already started
|
||||
*/
|
||||
ice_aq_start_lldp(hw, NULL);
|
||||
|
||||
/* Attempt to start DCBX. Ignore errors as this
|
||||
* will error if it is already started
|
||||
*/
|
||||
ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
|
||||
}
|
||||
|
||||
err = ice_init_dcb(hw);
|
||||
if (err) {
|
||||
/* FW LLDP not in usable state, default to SW DCBx/LLDP */
|
||||
dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
|
||||
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
|
||||
hw->port_info->is_sw_lldp = true;
|
||||
}
|
||||
|
||||
if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
|
||||
dev_info(&pf->pdev->dev, "DCBX disabled\n");
|
||||
|
||||
/* LLDP disabled in FW */
|
||||
if (port_info->is_sw_lldp) {
|
||||
sw_default = 1;
|
||||
dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
|
||||
}
|
||||
|
||||
if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
|
||||
sw_default = 1;
|
||||
dev_info(&pf->pdev->dev, "DCBX not started\n");
|
||||
}
|
||||
|
||||
if (sw_default) {
|
||||
err = ice_dcb_sw_dflt_cfg(pf);
|
||||
if (err) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to set local DCB config %d\n", err);
|
||||
err = -EIO;
|
||||
goto dcb_init_err;
|
||||
}
|
||||
|
||||
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
|
||||
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
|
||||
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* DCBX in FW and LLDP enabled in FW */
|
||||
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
|
||||
|
||||
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
|
||||
|
||||
err = ice_dcb_init_cfg(pf);
|
||||
if (err)
|
||||
goto dcb_init_err;
|
||||
|
||||
dev_info(&pf->pdev->dev, "DCBX offload supported\n");
|
||||
return err;
|
||||
|
||||
dcb_init_err:
|
||||
dev_err(dev, "DCB init failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_update_dcb_stats - Update DCB stats counters
|
||||
* @pf: PF whose stats needs to be updated
|
||||
*/
|
||||
void ice_update_dcb_stats(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_hw_port_stats *prev_ps, *cur_ps;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
u8 pf_id = hw->pf_id;
|
||||
int i;
|
||||
|
||||
prev_ps = &pf->stats_prev;
|
||||
cur_ps = &pf->stats;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
|
||||
pf->stat_prev_loaded,
|
||||
&prev_ps->priority_xoff_rx[i],
|
||||
&cur_ps->priority_xoff_rx[i]);
|
||||
ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
|
||||
pf->stat_prev_loaded,
|
||||
&prev_ps->priority_xon_rx[i],
|
||||
&cur_ps->priority_xon_rx[i]);
|
||||
ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
|
||||
pf->stat_prev_loaded,
|
||||
&prev_ps->priority_xon_tx[i],
|
||||
&cur_ps->priority_xon_tx[i]);
|
||||
ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
|
||||
pf->stat_prev_loaded,
|
||||
&prev_ps->priority_xoff_tx[i],
|
||||
&cur_ps->priority_xoff_tx[i]);
|
||||
ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
|
||||
pf->stat_prev_loaded,
|
||||
&prev_ps->priority_xon_2_xoff[i],
|
||||
&cur_ps->priority_xon_2_xoff[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
|
||||
* @tx_ring: ring to send buffer on
|
||||
* @first: pointer to struct ice_tx_buf
|
||||
*/
|
||||
int
|
||||
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
|
||||
struct ice_tx_buf *first)
|
||||
{
|
||||
struct sk_buff *skb = first->skb;
|
||||
|
||||
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
|
||||
return 0;
|
||||
|
||||
/* Insert 802.1p priority into VLAN header */
|
||||
if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
|
||||
skb->priority != TC_PRIO_CONTROL) {
|
||||
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
|
||||
/* Mask the lower 3 bits to set the 802.1p priority */
|
||||
first->tx_flags |= (skb->priority & 0x7) <<
|
||||
ICE_TX_FLAGS_VLAN_PR_S;
|
||||
if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
|
||||
struct vlan_ethhdr *vhdr;
|
||||
int rc;
|
||||
|
||||
rc = skb_cow_head(skb, 0);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||
vhdr->h_vlan_TCI = htons(first->tx_flags >>
|
||||
ICE_TX_FLAGS_VLAN_S);
|
||||
} else {
|
||||
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dcb_process_lldp_set_mib_change - Process MIB change
|
||||
* @pf: ptr to ice_pf
|
||||
* @event: pointer to the admin queue receive event
|
||||
*/
|
||||
void
|
||||
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
||||
struct ice_rq_event_info *event)
|
||||
{
|
||||
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
|
||||
struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
|
||||
int err;
|
||||
|
||||
prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
|
||||
sizeof(*dcbcfg), GFP_KERNEL);
|
||||
if (!dcbcfg)
|
||||
return;
|
||||
|
||||
err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
|
||||
if (!err)
|
||||
ice_pf_dcb_cfg(pf, dcbcfg);
|
||||
|
||||
devm_kfree(&pf->pdev->dev, dcbcfg);
|
||||
|
||||
/* Get updated DCBx data from firmware */
|
||||
err = ice_get_dcb_cfg(pf->hw.port_info);
|
||||
if (err)
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to get DCB config\n");
|
||||
} else {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"MIB Change Event in HOST mode\n");
|
||||
}
|
||||
}
|
61
drivers/net/ethernet/intel/ice/ice_dcb_lib.h
Normal file
61
drivers/net/ethernet/intel/ice/ice_dcb_lib.h
Normal file
@@ -0,0 +1,61 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2019, Intel Corporation. */
|
||||
|
||||
#ifndef _ICE_DCB_LIB_H_
|
||||
#define _ICE_DCB_LIB_H_
|
||||
|
||||
#include "ice.h"
|
||||
#include "ice_lib.h"
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
|
||||
|
||||
void ice_dcb_rebuild(struct ice_pf *pf);
|
||||
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
|
||||
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
|
||||
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
|
||||
int ice_init_pf_dcb(struct ice_pf *pf);
|
||||
void ice_update_dcb_stats(struct ice_pf *pf);
|
||||
int
|
||||
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
|
||||
struct ice_tx_buf *first);
|
||||
void
|
||||
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
||||
struct ice_rq_event_info *event);
|
||||
static inline void
|
||||
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
|
||||
{
|
||||
tlan_ctx->cgd_num = ring->dcb_tc;
|
||||
}
|
||||
#else
|
||||
#define ice_dcb_rebuild(pf) do {} while (0)
|
||||
|
||||
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
|
||||
{
|
||||
return ICE_DFLT_TRAFFIC_CLASS;
|
||||
}
|
||||
|
||||
static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int ice_init_pf_dcb(struct ice_pf *pf)
|
||||
{
|
||||
dev_dbg(&pf->pdev->dev, "DCB not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
|
||||
struct ice_tx_buf __always_unused *first)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ice_update_dcb_stats(pf) do {} while (0)
|
||||
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
|
||||
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
|
||||
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
|
||||
#endif /* CONFIG_DCB */
|
||||
#endif /* _ICE_DCB_LIB_H_ */
|
@@ -4,6 +4,8 @@
|
||||
/* ethtool support for ice */
|
||||
|
||||
#include "ice.h"
|
||||
#include "ice_lib.h"
|
||||
#include "ice_dcb_lib.h"
|
||||
|
||||
struct ice_stats {
|
||||
char stat_string[ETH_GSTRING_LEN];
|
||||
@@ -33,8 +35,14 @@ static int ice_q_stats_len(struct net_device *netdev)
|
||||
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
|
||||
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
|
||||
|
||||
#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
|
||||
ice_q_stats_len(n))
|
||||
#define ICE_PFC_STATS_LEN ( \
|
||||
(FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
|
||||
FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
|
||||
FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
|
||||
FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
|
||||
/ sizeof(u64))
|
||||
#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
|
||||
ICE_VSI_STATS_LEN + ice_q_stats_len(n))
|
||||
|
||||
static const struct ice_stats ice_gstrings_vsi_stats[] = {
|
||||
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
|
||||
@@ -126,6 +134,7 @@ struct ice_priv_flag {
|
||||
|
||||
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
|
||||
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
|
||||
ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
|
||||
};
|
||||
|
||||
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
|
||||
@@ -309,6 +318,22 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
||||
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
|
||||
snprintf(p, ETH_GSTRING_LEN,
|
||||
"port.tx-priority-%u-xon", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
snprintf(p, ETH_GSTRING_LEN,
|
||||
"port.tx-priority-%u-xoff", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
|
||||
snprintf(p, ETH_GSTRING_LEN,
|
||||
"port.rx-priority-%u-xon", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
snprintf(p, ETH_GSTRING_LEN,
|
||||
"port.rx-priority-%u-xoff", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
break;
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
|
||||
@@ -382,13 +407,19 @@ static u32 ice_get_priv_flags(struct net_device *netdev)
|
||||
static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
|
||||
DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int ret = 0;
|
||||
u32 i;
|
||||
|
||||
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
|
||||
|
||||
bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
|
||||
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
|
||||
const struct ice_priv_flag *priv_flag;
|
||||
|
||||
@@ -400,7 +431,79 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
clear_bit(priv_flag->bitno, pf->flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
|
||||
|
||||
if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
|
||||
if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
|
||||
enum ice_status status;
|
||||
|
||||
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
|
||||
NULL);
|
||||
/* If unregistering for LLDP events fails, this is
|
||||
* not an error state, as there shouldn't be any
|
||||
* events to respond to.
|
||||
*/
|
||||
if (status)
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Failed to unreg for LLDP events\n");
|
||||
|
||||
/* The AQ call to stop the FW LLDP agent will generate
|
||||
* an error if the agent is already stopped.
|
||||
*/
|
||||
status = ice_aq_stop_lldp(&pf->hw, true, NULL);
|
||||
if (status)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Fail to stop LLDP agent\n");
|
||||
/* Use case for having the FW LLDP agent stopped
|
||||
* will likely not need DCB, so failure to init is
|
||||
* not a concern of ethtool
|
||||
*/
|
||||
status = ice_init_pf_dcb(pf);
|
||||
if (status)
|
||||
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
|
||||
} else {
|
||||
enum ice_status status;
|
||||
bool dcbx_agent_status;
|
||||
|
||||
/* AQ command to start FW LLDP agent will return an
|
||||
* error if the agent is already started
|
||||
*/
|
||||
status = ice_aq_start_lldp(&pf->hw, NULL);
|
||||
if (status)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Fail to start LLDP Agent\n");
|
||||
|
||||
/* AQ command to start FW DCBx agent will fail if
|
||||
* the agent is already started
|
||||
*/
|
||||
status = ice_aq_start_stop_dcbx(&pf->hw, true,
|
||||
&dcbx_agent_status,
|
||||
NULL);
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"Failed to start FW DCBX\n");
|
||||
|
||||
dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
|
||||
dcbx_agent_status ? "ACTIVE" : "DISABLED");
|
||||
|
||||
/* Failure to configure MIB change or init DCB is not
|
||||
* relevant to ethtool. Print notification that
|
||||
* registration/init failed but do not return error
|
||||
* state to ethtool
|
||||
*/
|
||||
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
|
||||
NULL);
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"Fail to reg for MIB change\n");
|
||||
|
||||
status = ice_init_pf_dcb(pf);
|
||||
if (status)
|
||||
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
|
||||
}
|
||||
}
|
||||
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ice_get_sset_count(struct net_device *netdev, int sset)
|
||||
@@ -486,6 +589,16 @@ ice_get_ethtool_stats(struct net_device *netdev,
|
||||
data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
|
||||
}
|
||||
|
||||
for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
|
||||
data[i++] = pf->stats.priority_xon_tx[j];
|
||||
data[i++] = pf->stats.priority_xoff_tx[j];
|
||||
}
|
||||
|
||||
for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
|
||||
data[i++] = pf->stats.priority_xon_rx[j];
|
||||
data[i++] = pf->stats.priority_xoff_rx[j];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -811,7 +924,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
|
||||
|
||||
link_info = &vsi->port_info->phy.link_info;
|
||||
|
||||
/* Initialize supported and advertised settings based on phy settings */
|
||||
/* Initialize supported and advertised settings based on PHY settings */
|
||||
switch (link_info->phy_type_low) {
|
||||
case ICE_PHY_TYPE_LOW_100BASE_TX:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
@@ -921,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
|
||||
25000baseCR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
25000baseCR_Full);
|
||||
break;
|
||||
@@ -1137,10 +1251,10 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
|
||||
*/
|
||||
static void
|
||||
ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
|
||||
struct net_device __always_unused *netdev)
|
||||
struct net_device *netdev)
|
||||
{
|
||||
/* link is down and the driver needs to fall back on
|
||||
* supported phy types to figure out what info to display
|
||||
* supported PHY types to figure out what info to display
|
||||
*/
|
||||
ice_phy_type_to_ethtool(netdev, ks);
|
||||
|
||||
@@ -1156,8 +1270,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
|
||||
*
|
||||
* Reports speed/duplex settings based on media_type
|
||||
*/
|
||||
static int ice_get_link_ksettings(struct net_device *netdev,
|
||||
struct ethtool_link_ksettings *ks)
|
||||
static int
|
||||
ice_get_link_ksettings(struct net_device *netdev,
|
||||
struct ethtool_link_ksettings *ks)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_link_status *hw_link_info;
|
||||
@@ -1349,7 +1464,7 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
|
||||
} else {
|
||||
/* If autoneg is currently enabled */
|
||||
if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
|
||||
/* If autoneg is supported 10GBASE_T is the only phy
|
||||
/* If autoneg is supported 10GBASE_T is the only PHY
|
||||
* that can disable it, so otherwise return error
|
||||
*/
|
||||
if (ethtool_link_ksettings_test_link_mode(ks,
|
||||
@@ -1399,14 +1514,13 @@ ice_set_link_ksettings(struct net_device *netdev,
|
||||
if (!p)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Check if this is lan vsi */
|
||||
for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
|
||||
/* Check if this is LAN VSI */
|
||||
ice_for_each_vsi(pf, idx)
|
||||
if (pf->vsi[idx]->type == ICE_VSI_PF) {
|
||||
if (np->vsi != pf->vsi[idx])
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (p->phy.media_type != ICE_MEDIA_BASET &&
|
||||
p->phy.media_type != ICE_MEDIA_FIBER &&
|
||||
@@ -1464,7 +1578,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
||||
if (!abilities)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Get the current phy config */
|
||||
/* Get the current PHY config */
|
||||
status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
|
||||
NULL);
|
||||
if (status) {
|
||||
@@ -1559,15 +1673,16 @@ done:
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_rxnfc - command to get RX flow classification rules
|
||||
* ice_get_rxnfc - command to get Rx flow classification rules
|
||||
* @netdev: network interface device structure
|
||||
* @cmd: ethtool rxnfc command
|
||||
* @rule_locs: buffer to rturn Rx flow classification rules
|
||||
*
|
||||
* Returns Success if the command is supported.
|
||||
*/
|
||||
static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
||||
u32 __always_unused *rule_locs)
|
||||
static int
|
||||
ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
||||
u32 __always_unused *rule_locs)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
@@ -1821,18 +1936,21 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
||||
struct ice_port_info *pi = np->vsi->port_info;
|
||||
struct ice_aqc_get_phy_caps_data *pcaps;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_dcbx_cfg *dcbx_cfg;
|
||||
enum ice_status status;
|
||||
|
||||
/* Initialize pause params */
|
||||
pause->rx_pause = 0;
|
||||
pause->tx_pause = 0;
|
||||
|
||||
dcbx_cfg = &pi->local_dcbx_cfg;
|
||||
|
||||
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
|
||||
GFP_KERNEL);
|
||||
if (!pcaps)
|
||||
return;
|
||||
|
||||
/* Get current phy config */
|
||||
/* Get current PHY config */
|
||||
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
||||
NULL);
|
||||
if (status)
|
||||
@@ -1841,6 +1959,10 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
||||
pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
|
||||
AUTONEG_ENABLE : AUTONEG_DISABLE);
|
||||
|
||||
if (dcbx_cfg->pfc.pfcena)
|
||||
/* PFC enabled so report LFC as off */
|
||||
goto out;
|
||||
|
||||
if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
|
||||
pause->tx_pause = 1;
|
||||
if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
|
||||
@@ -1861,6 +1983,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_link_status *hw_link_info;
|
||||
struct ice_pf *pf = np->vsi->back;
|
||||
struct ice_dcbx_cfg *dcbx_cfg;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_port_info *pi;
|
||||
@@ -1871,6 +1994,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
||||
|
||||
pi = vsi->port_info;
|
||||
hw_link_info = &pi->phy.link_info;
|
||||
dcbx_cfg = &pi->local_dcbx_cfg;
|
||||
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
|
||||
|
||||
/* Changing the port's flow control is not supported if this isn't the
|
||||
@@ -1893,6 +2017,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
||||
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
|
||||
}
|
||||
|
||||
if (dcbx_cfg->pfc.pfcena) {
|
||||
netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (pause->rx_pause && pause->tx_pause)
|
||||
pi->fc.req_mode = ICE_FC_FULL;
|
||||
else if (pause->rx_pause && !pause->tx_pause)
|
||||
@@ -2021,11 +2149,12 @@ out:
|
||||
* @key: hash key
|
||||
* @hfunc: hash function
|
||||
*
|
||||
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
|
||||
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
|
||||
* returns 0 after programming the table.
|
||||
*/
|
||||
static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
|
||||
const u8 *key, const u8 hfunc)
|
||||
static int
|
||||
ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
|
||||
const u8 hfunc)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
@@ -2087,7 +2216,7 @@ enum ice_container_type {
|
||||
/**
|
||||
* ice_get_rc_coalesce - get ITR values for specific ring container
|
||||
* @ec: ethtool structure to fill with driver's coalesce settings
|
||||
* @c_type: container type, RX or TX
|
||||
* @c_type: container type, Rx or Tx
|
||||
* @rc: ring container that the ITR values will come from
|
||||
*
|
||||
* Query the device for ice_ring_container specific ITR values. This is
|
||||
@@ -2100,12 +2229,18 @@ static int
|
||||
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
|
||||
struct ice_ring_container *rc)
|
||||
{
|
||||
struct ice_pf *pf = rc->ring->vsi->back;
|
||||
struct ice_pf *pf;
|
||||
|
||||
if (!rc->ring)
|
||||
return -EINVAL;
|
||||
|
||||
pf = rc->ring->vsi->back;
|
||||
|
||||
switch (c_type) {
|
||||
case ICE_RX_CONTAINER:
|
||||
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
|
||||
ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
|
||||
ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
|
||||
break;
|
||||
case ICE_TX_CONTAINER:
|
||||
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
|
||||
@@ -2119,50 +2254,61 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
|
||||
* @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
|
||||
* @ec: coalesce settings to program the device with
|
||||
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
|
||||
*
|
||||
* Return 0 on success, and negative under the following conditions:
|
||||
* 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
|
||||
* 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
|
||||
*/
|
||||
static int
|
||||
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
|
||||
{
|
||||
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
|
||||
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx))
|
||||
return -EINVAL;
|
||||
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_rxq) {
|
||||
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_txq) {
|
||||
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_get_coalesce - get ITR/INTRL values for the device
|
||||
* @netdev: pointer to the netdev associated with this query
|
||||
* @ec: ethtool structure to fill with driver's coalesce settings
|
||||
* @q_num: queue number to get the coalesce settings for
|
||||
*
|
||||
* If the caller passes in a negative q_num then we return coalesce settings
|
||||
* based on queue number 0, else use the actual q_num passed in.
|
||||
*/
|
||||
static int
|
||||
__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
|
||||
int q_num)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
int tx = -EINVAL, rx = -EINVAL;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
|
||||
if (q_num < 0) {
|
||||
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
&vsi->rx_rings[0]->q_vector->rx);
|
||||
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
&vsi->tx_rings[0]->q_vector->tx);
|
||||
if (q_num < 0)
|
||||
q_num = 0;
|
||||
|
||||
goto update_coalesced_frames;
|
||||
}
|
||||
|
||||
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
|
||||
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx);
|
||||
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx);
|
||||
} else if (q_num < vsi->num_rxq) {
|
||||
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx);
|
||||
} else if (q_num < vsi->num_txq) {
|
||||
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx);
|
||||
} else {
|
||||
/* q_num is invalid for both Rx and Tx queues */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
update_coalesced_frames:
|
||||
/* either q_num is invalid for both Rx and Tx queues or setting coalesce
|
||||
* failed completely
|
||||
*/
|
||||
if (tx && rx)
|
||||
if (ice_get_q_coalesce(vsi, ec, q_num))
|
||||
return -EINVAL;
|
||||
|
||||
if (q_num < vsi->num_txq)
|
||||
@@ -2180,15 +2326,16 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
|
||||
return __ice_get_coalesce(netdev, ec, -1);
|
||||
}
|
||||
|
||||
static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
|
||||
struct ethtool_coalesce *ec)
|
||||
static int
|
||||
ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
|
||||
struct ethtool_coalesce *ec)
|
||||
{
|
||||
return __ice_get_coalesce(netdev, ec, q_num);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_rc_coalesce - set ITR values for specific ring container
|
||||
* @c_type: container type, RX or TX
|
||||
* @c_type: container type, Rx or Tx
|
||||
* @ec: ethtool structure from user to update ITR settings
|
||||
* @rc: ring container that the ITR values will come from
|
||||
* @vsi: VSI associated to the ring container
|
||||
@@ -2213,6 +2360,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
|
||||
switch (c_type) {
|
||||
case ICE_RX_CONTAINER:
|
||||
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
|
||||
(ec->rx_coalesce_usecs_high &&
|
||||
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
|
||||
netdev_info(vsi->netdev,
|
||||
"Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n",
|
||||
pf->hw.intrl_gran, ICE_MAX_INTRL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
|
||||
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
|
||||
wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
|
||||
rc->ring->q_vector->v_idx),
|
||||
ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
|
||||
pf->hw.intrl_gran));
|
||||
}
|
||||
|
||||
if (ec->rx_coalesce_usecs != itr_setting &&
|
||||
ec->use_adaptive_rx_coalesce) {
|
||||
netdev_info(vsi->netdev,
|
||||
@@ -2235,6 +2399,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
}
|
||||
break;
|
||||
case ICE_TX_CONTAINER:
|
||||
if (ec->tx_coalesce_usecs_high) {
|
||||
netdev_info(vsi->netdev,
|
||||
"setting tx-usecs-high is not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ec->tx_coalesce_usecs != itr_setting &&
|
||||
ec->use_adaptive_tx_coalesce) {
|
||||
netdev_info(vsi->netdev,
|
||||
@@ -2264,54 +2434,77 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
|
||||
* @vsi: VSI associated to the queue that need updating
|
||||
* @ec: coalesce settings to program the device with
|
||||
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
|
||||
*
|
||||
* Return 0 on success, and negative under the following conditions:
|
||||
* 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
|
||||
* 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
|
||||
*/
|
||||
static int
|
||||
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
|
||||
{
|
||||
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
|
||||
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_rxq) {
|
||||
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_txq) {
|
||||
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_set_coalesce - set ITR/INTRL values for the device
|
||||
* @netdev: pointer to the netdev associated with this query
|
||||
* @ec: ethtool structure to fill with driver's coalesce settings
|
||||
* @q_num: queue number to get the coalesce settings for
|
||||
*
|
||||
* If the caller passes in a negative q_num then we set the coalesce settings
|
||||
* for all Tx/Rx queues, else use the actual q_num passed in.
|
||||
*/
|
||||
static int
|
||||
__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
|
||||
int q_num)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
int rx = -EINVAL, tx = -EINVAL;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
|
||||
if (q_num < 0) {
|
||||
int i;
|
||||
|
||||
ice_for_each_q_vector(vsi, i) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[i];
|
||||
|
||||
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
&q_vector->rx, vsi) ||
|
||||
ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
&q_vector->tx, vsi))
|
||||
if (ice_set_q_coalesce(vsi, ec, i))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
goto set_work_lmt;
|
||||
}
|
||||
|
||||
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
|
||||
rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx,
|
||||
vsi);
|
||||
tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx,
|
||||
vsi);
|
||||
} else if (q_num < vsi->num_rxq) {
|
||||
rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx,
|
||||
vsi);
|
||||
} else if (q_num < vsi->num_txq) {
|
||||
tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx,
|
||||
vsi);
|
||||
}
|
||||
|
||||
/* either q_num is invalid for both Rx and Tx queues or setting coalesce
|
||||
* failed completely
|
||||
*/
|
||||
if (rx && tx)
|
||||
if (ice_set_q_coalesce(vsi, ec, q_num))
|
||||
return -EINVAL;
|
||||
|
||||
set_work_lmt:
|
||||
|
||||
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
|
||||
vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
|
||||
ec->rx_max_coalesced_frames_irq);
|
||||
@@ -2325,8 +2518,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
|
||||
return __ice_set_coalesce(netdev, ec, -1);
|
||||
}
|
||||
|
||||
static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
|
||||
struct ethtool_coalesce *ec)
|
||||
static int
|
||||
ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
|
||||
struct ethtool_coalesce *ec)
|
||||
{
|
||||
return __ice_set_coalesce(netdev, ec, q_num);
|
||||
}
|
||||
|
@@ -49,6 +49,9 @@
|
||||
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
|
||||
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
|
||||
#define PF_MBX_ATQT 0x0022E300
|
||||
#define PRTDCB_GENS 0x00083020
|
||||
#define PRTDCB_GENS_DCBX_STATUS_S 0
|
||||
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
|
||||
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
|
||||
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
|
||||
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
|
||||
@@ -106,6 +109,16 @@
|
||||
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
|
||||
#define PFHMC_ERRORDATA 0x00520500
|
||||
#define PFHMC_ERRORINFO 0x00520400
|
||||
#define GLINT_CTL 0x0016CC54
|
||||
#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
|
||||
#define GLINT_CTL_ITR_GRAN_200_S 16
|
||||
#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16)
|
||||
#define GLINT_CTL_ITR_GRAN_100_S 20
|
||||
#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20)
|
||||
#define GLINT_CTL_ITR_GRAN_50_S 24
|
||||
#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
|
||||
#define GLINT_CTL_ITR_GRAN_25_S 28
|
||||
#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
|
||||
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
|
||||
#define GLINT_DYN_CTL_INTENA_M BIT(0)
|
||||
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
|
||||
@@ -150,11 +163,15 @@
|
||||
#define PFINT_OICR_ENA 0x0016C900
|
||||
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
|
||||
#define QINT_RQCTL_MSIX_INDX_S 0
|
||||
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
|
||||
#define QINT_RQCTL_ITR_INDX_S 11
|
||||
#define QINT_RQCTL_ITR_INDX_M ICE_M(0x3, 11)
|
||||
#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
|
||||
#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
|
||||
#define QINT_TQCTL_MSIX_INDX_S 0
|
||||
#define QINT_TQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
|
||||
#define QINT_TQCTL_ITR_INDX_S 11
|
||||
#define QINT_TQCTL_ITR_INDX_M ICE_M(0x3, 11)
|
||||
#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
|
||||
#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
|
||||
#define VPINT_ALLOC_FIRST_S 0
|
||||
@@ -168,6 +185,8 @@
|
||||
#define VPINT_ALLOC_PCI_LAST_S 12
|
||||
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
|
||||
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
|
||||
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
|
||||
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
|
||||
#define GLLAN_RCTL_0 0x002941F8
|
||||
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
|
||||
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
|
||||
@@ -306,11 +325,16 @@
|
||||
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
|
||||
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
|
||||
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
|
||||
#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
|
||||
#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
|
||||
#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64))
|
||||
#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64))
|
||||
#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
|
||||
#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
|
||||
#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
|
||||
#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
|
||||
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
|
||||
#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
|
||||
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
|
||||
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
|
||||
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
|
||||
|
@@ -20,7 +20,7 @@ union ice_32byte_rx_desc {
|
||||
} lo_dword;
|
||||
union {
|
||||
__le32 rss; /* RSS Hash */
|
||||
__le32 fd_id; /* Flow Director filter id */
|
||||
__le32 fd_id; /* Flow Director filter ID */
|
||||
} hi_dword;
|
||||
} qword0;
|
||||
struct {
|
||||
@@ -99,7 +99,7 @@ enum ice_rx_ptype_payload_layer {
|
||||
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
|
||||
};
|
||||
|
||||
/* RX Flex Descriptor
|
||||
/* Rx Flex Descriptor
|
||||
* This descriptor is used instead of the legacy version descriptor when
|
||||
* ice_rlan_ctx.adv_desc is set
|
||||
*/
|
||||
@@ -113,7 +113,7 @@ union ice_32b_rx_flex_desc {
|
||||
} read;
|
||||
struct {
|
||||
/* Qword 0 */
|
||||
u8 rxdid; /* descriptor builder profile id */
|
||||
u8 rxdid; /* descriptor builder profile ID */
|
||||
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
|
||||
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
|
||||
__le16 pkt_len; /* [15:14] are reserved */
|
||||
@@ -149,7 +149,7 @@ union ice_32b_rx_flex_desc {
|
||||
|
||||
/* Rx Flex Descriptor NIC Profile
|
||||
* This descriptor corresponds to RxDID 2 which contains
|
||||
* metadata fields for RSS, flow id and timestamp info
|
||||
* metadata fields for RSS, flow ID and timestamp info
|
||||
*/
|
||||
struct ice_32b_rx_flex_desc_nic {
|
||||
/* Qword 0 */
|
||||
@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid {
|
||||
ICE_RX_MDID_HASH_HIGH,
|
||||
};
|
||||
|
||||
/* Rx Flag64 packet flag bits */
|
||||
enum ice_rx_flg64_bits {
|
||||
ICE_RXFLG_PKT_DSI = 0,
|
||||
ICE_RXFLG_EVLAN_x8100 = 15,
|
||||
ICE_RXFLG_EVLAN_x9100,
|
||||
ICE_RXFLG_VLAN_x8100,
|
||||
ICE_RXFLG_TNL_MAC = 22,
|
||||
ICE_RXFLG_TNL_VLAN,
|
||||
ICE_RXFLG_PKT_FRG,
|
||||
ICE_RXFLG_FIN = 32,
|
||||
ICE_RXFLG_SYN,
|
||||
ICE_RXFLG_RST,
|
||||
ICE_RXFLG_TNL0 = 38,
|
||||
ICE_RXFLG_TNL1,
|
||||
ICE_RXFLG_TNL2,
|
||||
ICE_RXFLG_UDP_GRE,
|
||||
ICE_RXFLG_RSVD = 63
|
||||
/* Rx/Tx Flag64 packet flag bits */
|
||||
enum ice_flg64_bits {
|
||||
ICE_FLG_PKT_DSI = 0,
|
||||
ICE_FLG_EVLAN_x8100 = 15,
|
||||
ICE_FLG_EVLAN_x9100,
|
||||
ICE_FLG_VLAN_x8100,
|
||||
ICE_FLG_TNL_MAC = 22,
|
||||
ICE_FLG_TNL_VLAN,
|
||||
ICE_FLG_PKT_FRG,
|
||||
ICE_FLG_FIN = 32,
|
||||
ICE_FLG_SYN,
|
||||
ICE_FLG_RST,
|
||||
ICE_FLG_TNL0 = 38,
|
||||
ICE_FLG_TNL1,
|
||||
ICE_FLG_TNL2,
|
||||
ICE_FLG_UDP_GRE,
|
||||
ICE_FLG_RSVD = 63
|
||||
};
|
||||
|
||||
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
|
||||
@@ -322,7 +322,7 @@ enum ice_rlan_ctx_rx_hsplit_1 {
|
||||
ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
|
||||
};
|
||||
|
||||
/* TX Descriptor */
|
||||
/* Tx Descriptor */
|
||||
struct ice_tx_desc {
|
||||
__le64 buf_addr; /* Address of descriptor's data buf */
|
||||
__le64 cmd_type_offset_bsz;
|
||||
@@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits {
|
||||
ICE_TX_DESC_CMD_EOP = 0x0001,
|
||||
ICE_TX_DESC_CMD_RS = 0x0002,
|
||||
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
|
||||
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
|
||||
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
|
||||
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
|
||||
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
|
||||
};
|
||||
|
||||
#define ICE_TXD_QW1_OFFSET_S 16
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -35,12 +35,16 @@ int
|
||||
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num);
|
||||
|
||||
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
|
||||
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
|
||||
|
||||
void ice_vsi_delete(struct ice_vsi *vsi);
|
||||
|
||||
int ice_vsi_clear(struct ice_vsi *vsi);
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
|
||||
#endif /* CONFIG_DCB */
|
||||
|
||||
struct ice_vsi *
|
||||
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
|
||||
enum ice_vsi_type type, u16 vf_id);
|
||||
@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_put_qs(struct ice_vsi *vsi);
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
|
||||
#endif /* CONFIG_DCB */
|
||||
|
||||
void ice_vsi_dis_irq(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_free_irq(struct ice_vsi *vsi);
|
||||
@@ -70,8 +78,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
|
||||
|
||||
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
|
||||
|
||||
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
|
||||
|
||||
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
|
||||
#endif /* !_ICE_LIB_H_ */
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,7 @@
|
||||
|
||||
/**
|
||||
* ice_aq_read_nvm
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @module_typeid: module pointer location in words from the NVM beginning
|
||||
* @offset: byte offset from the module beginning
|
||||
* @length: length of the section to be read (in bytes from the offset)
|
||||
@@ -235,7 +235,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
|
||||
|
||||
/**
|
||||
* ice_init_nvm - initializes NVM setting
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* This function reads and populates NVM settings such as Shadow RAM size,
|
||||
* max_timeout, and blank_nvm_mode
|
||||
@@ -248,7 +248,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
|
||||
u32 fla, gens_stat;
|
||||
u8 sr_size;
|
||||
|
||||
/* The SR size is stored regardless of the nvm programming mode
|
||||
/* The SR size is stored regardless of the NVM programming mode
|
||||
* as the blank mode may be used in the factory line.
|
||||
*/
|
||||
gens_stat = rd32(hw, GLNVM_GENS);
|
||||
|
@@ -43,9 +43,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
|
||||
/**
|
||||
* ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
|
||||
* @start_node: pointer to the starting ice_sched_node struct in a sub-tree
|
||||
* @teid: node teid to search
|
||||
* @teid: node TEID to search
|
||||
*
|
||||
* This function searches for a node matching the teid in the scheduling tree
|
||||
* This function searches for a node matching the TEID in the scheduling tree
|
||||
* from the SW DB. The search is recursive and is restricted by the number of
|
||||
* layers it has searched through; stopping at the max supported layer.
|
||||
*
|
||||
@@ -66,7 +66,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
|
||||
start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
|
||||
return NULL;
|
||||
|
||||
/* Check if teid matches to any of the children nodes */
|
||||
/* Check if TEID matches to any of the children nodes */
|
||||
for (i = 0; i < start_node->num_children; i++)
|
||||
if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
|
||||
return start_node->children[i];
|
||||
@@ -86,7 +86,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
|
||||
|
||||
/**
|
||||
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @cmd_opc: cmd opcode
|
||||
* @elems_req: number of elements to request
|
||||
* @buf: pointer to buffer
|
||||
@@ -118,7 +118,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
|
||||
|
||||
/**
|
||||
* ice_aq_query_sched_elems - query scheduler elements
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @elems_req: number of elements to query
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
|
||||
*
|
||||
* Query scheduling elements (0x0404)
|
||||
*/
|
||||
static enum ice_status
|
||||
enum ice_status
|
||||
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
|
||||
struct ice_aqc_get_elem *buf, u16 buf_size,
|
||||
u16 *elems_ret, struct ice_sq_cd *cd)
|
||||
@@ -137,31 +137,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
|
||||
elems_ret, cd);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_query_elem - query element information from hw
|
||||
* @hw: pointer to the hw struct
|
||||
* @node_teid: node teid to be queried
|
||||
* @buf: buffer to element information
|
||||
*
|
||||
* This function queries HW element information
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
|
||||
struct ice_aqc_get_elem *buf)
|
||||
{
|
||||
u16 buf_size, num_elem_ret = 0;
|
||||
enum ice_status status;
|
||||
|
||||
buf_size = sizeof(*buf);
|
||||
memset(buf, 0, buf_size);
|
||||
buf->generic[0].node_teid = cpu_to_le32(node_teid);
|
||||
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
|
||||
NULL);
|
||||
if (status || num_elem_ret != 1)
|
||||
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
|
||||
* @pi: port information structure
|
||||
@@ -226,7 +201,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
|
||||
|
||||
/**
|
||||
* ice_aq_delete_sched_elems - delete scheduler elements
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @grps_req: number of groups to delete
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
@@ -246,13 +221,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_remove_elems - remove nodes from hw
|
||||
* @hw: pointer to the hw struct
|
||||
* ice_sched_remove_elems - remove nodes from HW
|
||||
* @hw: pointer to the HW struct
|
||||
* @parent: pointer to the parent node
|
||||
* @num_nodes: number of nodes
|
||||
* @node_teids: array of node teids to be deleted
|
||||
*
|
||||
* This function remove nodes from hw
|
||||
* This function remove nodes from HW
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
|
||||
@@ -276,7 +251,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
|
||||
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
|
||||
&num_groups_removed, NULL);
|
||||
if (status || num_groups_removed != 1)
|
||||
ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
|
||||
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
|
||||
hw->adminq.sq_last_status);
|
||||
|
||||
devm_kfree(ice_hw_to_dev(hw), buf);
|
||||
return status;
|
||||
@@ -284,7 +260,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
|
||||
|
||||
/**
|
||||
* ice_sched_get_first_node - get the first node of the given layer
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @parent: pointer the base node of the subtree
|
||||
* @layer: layer number
|
||||
*
|
||||
@@ -360,12 +336,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
|
||||
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
|
||||
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
|
||||
u32 teid = le32_to_cpu(node->info.node_teid);
|
||||
enum ice_status status;
|
||||
|
||||
status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
|
||||
if (status)
|
||||
ice_debug(hw, ICE_DBG_SCHED,
|
||||
"remove element failed %d\n", status);
|
||||
ice_sched_remove_elems(hw, node->parent, 1, &teid);
|
||||
}
|
||||
parent = node->parent;
|
||||
/* root has no parent */
|
||||
@@ -409,7 +381,7 @@ err_exit:
|
||||
|
||||
/**
|
||||
* ice_aq_get_dflt_topo - gets default scheduler topology
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @lport: logical port number
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
@@ -439,7 +411,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
|
||||
|
||||
/**
|
||||
* ice_aq_add_sched_elems - adds scheduling element
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @grps_req: the number of groups that are requested to be added
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
@@ -460,7 +432,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
|
||||
|
||||
/**
|
||||
* ice_aq_suspend_sched_elems - suspend scheduler elements
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @elems_req: number of elements to suspend
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
@@ -481,7 +453,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
|
||||
|
||||
/**
|
||||
* ice_aq_resume_sched_elems - resume scheduler elements
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @elems_req: number of elements to resume
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
@@ -502,7 +474,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
|
||||
|
||||
/**
|
||||
* ice_aq_query_sched_res - query scheduler resource
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @buf_size: buffer size in bytes
|
||||
* @buf: pointer to buffer
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -521,13 +493,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_suspend_resume_elems - suspend or resume hw nodes
|
||||
* @hw: pointer to the hw struct
|
||||
* ice_sched_suspend_resume_elems - suspend or resume HW nodes
|
||||
* @hw: pointer to the HW struct
|
||||
* @num_nodes: number of nodes
|
||||
* @node_teids: array of node teids to be suspended or resumed
|
||||
* @suspend: true means suspend / false means resume
|
||||
*
|
||||
* This function suspends or resumes hw nodes
|
||||
* This function suspends or resumes HW nodes
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
|
||||
@@ -561,10 +533,54 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_clear_agg - clears the agg related information
|
||||
* ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
* @tc: TC number
|
||||
* @new_numqs: number of queues
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
|
||||
{
|
||||
struct ice_vsi_ctx *vsi_ctx;
|
||||
struct ice_q_ctx *q_ctx;
|
||||
|
||||
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
|
||||
if (!vsi_ctx)
|
||||
return ICE_ERR_PARAM;
|
||||
/* allocate LAN queue contexts */
|
||||
if (!vsi_ctx->lan_q_ctx[tc]) {
|
||||
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
|
||||
new_numqs,
|
||||
sizeof(*q_ctx),
|
||||
GFP_KERNEL);
|
||||
if (!vsi_ctx->lan_q_ctx[tc])
|
||||
return ICE_ERR_NO_MEMORY;
|
||||
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
|
||||
return 0;
|
||||
}
|
||||
/* num queues are increased, update the queue contexts */
|
||||
if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
|
||||
u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
|
||||
|
||||
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
|
||||
sizeof(*q_ctx), GFP_KERNEL);
|
||||
if (!q_ctx)
|
||||
return ICE_ERR_NO_MEMORY;
|
||||
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
|
||||
prev_num * sizeof(*q_ctx));
|
||||
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
|
||||
vsi_ctx->lan_q_ctx[tc] = q_ctx;
|
||||
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_clear_agg - clears the aggregator related information
|
||||
* @hw: pointer to the hardware structure
|
||||
*
|
||||
* This function removes agg list and free up agg related memory
|
||||
* This function removes aggregator list and free up aggregator related memory
|
||||
* previously allocated.
|
||||
*/
|
||||
void ice_sched_clear_agg(struct ice_hw *hw)
|
||||
@@ -622,7 +638,7 @@ void ice_sched_clear_port(struct ice_port_info *pi)
|
||||
|
||||
/**
|
||||
* ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Cleanup scheduling elements from SW DB for all the ports
|
||||
*/
|
||||
@@ -646,16 +662,16 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_add_elems - add nodes to hw and SW DB
|
||||
* ice_sched_add_elems - add nodes to HW and SW DB
|
||||
* @pi: port information structure
|
||||
* @tc_node: pointer to the branch node
|
||||
* @parent: pointer to the parent node
|
||||
* @layer: layer number to add nodes
|
||||
* @num_nodes: number of nodes
|
||||
* @num_nodes_added: pointer to num nodes added
|
||||
* @first_node_teid: if new nodes are added then return the teid of first node
|
||||
* @first_node_teid: if new nodes are added then return the TEID of first node
|
||||
*
|
||||
* This function add nodes to hw as well as to SW DB for a given layer
|
||||
* This function add nodes to HW as well as to SW DB for a given layer
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
|
||||
@@ -697,7 +713,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
|
||||
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
|
||||
&num_groups_added, NULL);
|
||||
if (status || num_groups_added != 1) {
|
||||
ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
|
||||
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
|
||||
hw->adminq.sq_last_status);
|
||||
devm_kfree(ice_hw_to_dev(hw), buf);
|
||||
return ICE_ERR_CFG;
|
||||
}
|
||||
@@ -748,7 +765,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
|
||||
* @parent: pointer to parent node
|
||||
* @layer: layer number to add nodes
|
||||
* @num_nodes: number of nodes to be added
|
||||
* @first_node_teid: pointer to the first node teid
|
||||
* @first_node_teid: pointer to the first node TEID
|
||||
* @num_nodes_added: pointer to number of nodes added
|
||||
*
|
||||
* This function add nodes to a given layer.
|
||||
@@ -800,7 +817,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
|
||||
|
||||
*num_nodes_added += num_added;
|
||||
}
|
||||
/* Don't modify the first node teid memory if the first node was
|
||||
/* Don't modify the first node TEID memory if the first node was
|
||||
* added already in the above call. Instead send some temp
|
||||
* memory for all other recursive calls.
|
||||
*/
|
||||
@@ -832,7 +849,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
|
||||
|
||||
/**
|
||||
* ice_sched_get_qgrp_layer - get the current queue group layer number
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* This function returns the current queue group layer number
|
||||
*/
|
||||
@@ -844,7 +861,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_sched_get_vsi_layer - get the current VSI layer number
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* This function returns the current VSI layer number
|
||||
*/
|
||||
@@ -855,7 +872,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
|
||||
* 7 4
|
||||
* 5 or less sw_entry_point_layer
|
||||
*/
|
||||
/* calculate the vsi layer based on number of layers. */
|
||||
/* calculate the VSI layer based on number of layers. */
|
||||
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
|
||||
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
|
||||
|
||||
@@ -973,7 +990,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
|
||||
goto err_init_port;
|
||||
}
|
||||
|
||||
/* If the last node is a leaf node then the index of the Q group
|
||||
/* If the last node is a leaf node then the index of the queue group
|
||||
* layer is two less than the number of elements.
|
||||
*/
|
||||
if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
|
||||
@@ -1082,7 +1099,7 @@ sched_query_out:
|
||||
|
||||
/**
|
||||
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @base: pointer to the base node
|
||||
* @node: pointer to the node to search
|
||||
*
|
||||
@@ -1114,13 +1131,13 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_get_free_qparent - Get a free lan or rdma q group node
|
||||
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc: branch number
|
||||
* @owner: lan or rdma
|
||||
* @owner: LAN or RDMA
|
||||
*
|
||||
* This function retrieves a free lan or rdma q group node
|
||||
* This function retrieves a free LAN or RDMA queue group node
|
||||
*/
|
||||
struct ice_sched_node *
|
||||
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
||||
@@ -1138,11 +1155,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
||||
if (!vsi_ctx)
|
||||
return NULL;
|
||||
vsi_node = vsi_ctx->sched.vsi_node[tc];
|
||||
/* validate invalid VSI id */
|
||||
/* validate invalid VSI ID */
|
||||
if (!vsi_node)
|
||||
goto lan_q_exit;
|
||||
|
||||
/* get the first q group node from VSI sub-tree */
|
||||
/* get the first queue group node from VSI sub-tree */
|
||||
qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
|
||||
while (qgrp_node) {
|
||||
/* make sure the qgroup node is part of the VSI subtree */
|
||||
@@ -1158,12 +1175,12 @@ lan_q_exit:
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_get_vsi_node - Get a VSI node based on VSI id
|
||||
* @hw: pointer to the hw struct
|
||||
* ice_sched_get_vsi_node - Get a VSI node based on VSI ID
|
||||
* @hw: pointer to the HW struct
|
||||
* @tc_node: pointer to the TC node
|
||||
* @vsi_handle: software VSI handle
|
||||
*
|
||||
* This function retrieves a VSI node for a given VSI id from a given
|
||||
* This function retrieves a VSI node for a given VSI ID from a given
|
||||
* TC branch
|
||||
*/
|
||||
static struct ice_sched_node *
|
||||
@@ -1188,7 +1205,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
|
||||
|
||||
/**
|
||||
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @num_qs: number of queues
|
||||
* @num_nodes: num nodes array
|
||||
*
|
||||
@@ -1204,7 +1221,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
|
||||
qgl = ice_sched_get_qgrp_layer(hw);
|
||||
vsil = ice_sched_get_vsi_layer(hw);
|
||||
|
||||
/* calculate num nodes from q group to VSI layer */
|
||||
/* calculate num nodes from queue group to VSI layer */
|
||||
for (i = qgl; i > vsil; i--) {
|
||||
/* round to the next integer if there is a remainder */
|
||||
num = DIV_ROUND_UP(num, hw->max_children[i]);
|
||||
@@ -1220,10 +1237,10 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc_node: pointer to the TC node
|
||||
* @num_nodes: pointer to the num nodes that needs to be added per layer
|
||||
* @owner: node owner (lan or rdma)
|
||||
* @owner: node owner (LAN or RDMA)
|
||||
*
|
||||
* This function adds the VSI child nodes to tree. It gets called for
|
||||
* lan and rdma separately.
|
||||
* LAN and RDMA separately.
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
||||
@@ -1270,45 +1287,9 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
|
||||
* @pi: port information structure
|
||||
* @vsi_node: pointer to the VSI node
|
||||
* @num_nodes: pointer to the num nodes that needs to be removed per layer
|
||||
* @owner: node owner (lan or rdma)
|
||||
*
|
||||
* This function removes the VSI child nodes from the tree. It gets called for
|
||||
* lan and rdma separately.
|
||||
*/
|
||||
static void
|
||||
ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
|
||||
struct ice_sched_node *vsi_node, u16 *num_nodes,
|
||||
u8 owner)
|
||||
{
|
||||
struct ice_sched_node *node, *next;
|
||||
u8 i, qgl, vsil;
|
||||
u16 num;
|
||||
|
||||
qgl = ice_sched_get_qgrp_layer(pi->hw);
|
||||
vsil = ice_sched_get_vsi_layer(pi->hw);
|
||||
|
||||
for (i = qgl; i > vsil; i--) {
|
||||
num = num_nodes[i];
|
||||
node = ice_sched_get_first_node(pi->hw, vsi_node, i);
|
||||
while (node && num) {
|
||||
next = node->sibling;
|
||||
if (node->owner == owner && !node->num_children) {
|
||||
ice_free_sched_node(pi, node);
|
||||
num--;
|
||||
}
|
||||
node = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @tc_node: pointer to TC node
|
||||
* @num_nodes: pointer to num nodes array
|
||||
*
|
||||
@@ -1427,7 +1408,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
|
||||
/* calculate number of supported nodes needed for this VSI */
|
||||
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
|
||||
|
||||
/* add vsi supported nodes to tc subtree */
|
||||
/* add VSI supported nodes to TC subtree */
|
||||
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
|
||||
num_nodes);
|
||||
}
|
||||
@@ -1446,7 +1427,6 @@ static enum ice_status
|
||||
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
||||
u8 tc, u16 new_numqs, u8 owner)
|
||||
{
|
||||
u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
|
||||
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
|
||||
struct ice_sched_node *vsi_node;
|
||||
struct ice_sched_node *tc_node;
|
||||
@@ -1454,7 +1434,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
||||
enum ice_status status = 0;
|
||||
struct ice_hw *hw = pi->hw;
|
||||
u16 prev_numqs;
|
||||
u8 i;
|
||||
|
||||
tc_node = ice_sched_get_tc_node(pi, tc);
|
||||
if (!tc_node)
|
||||
@@ -1468,41 +1447,30 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
||||
if (!vsi_ctx)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
if (owner == ICE_SCHED_NODE_OWNER_LAN)
|
||||
prev_numqs = vsi_ctx->sched.max_lanq[tc];
|
||||
else
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
/* num queues are not changed */
|
||||
if (prev_numqs == new_numqs)
|
||||
prev_numqs = vsi_ctx->sched.max_lanq[tc];
|
||||
/* num queues are not changed or less than the previous number */
|
||||
if (new_numqs <= prev_numqs)
|
||||
return status;
|
||||
status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* calculate number of nodes based on prev/new number of qs */
|
||||
if (prev_numqs)
|
||||
ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
|
||||
|
||||
if (new_numqs)
|
||||
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
|
||||
|
||||
if (prev_numqs > new_numqs) {
|
||||
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
|
||||
new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
|
||||
|
||||
ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
|
||||
owner);
|
||||
} else {
|
||||
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
|
||||
new_num_nodes[i] -= prev_num_nodes[i];
|
||||
|
||||
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
|
||||
new_num_nodes, owner);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Keep the max number of queue configuration all the time. Update the
|
||||
* tree only if number of queues > previous number of queues. This may
|
||||
* leave some extra nodes in the tree if number of queues < previous
|
||||
* number but that wouldn't harm anything. Removing those extra nodes
|
||||
* may complicate the code if those nodes are part of SRL or
|
||||
* individually rate limited.
|
||||
*/
|
||||
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
|
||||
new_num_nodes, owner);
|
||||
if (status)
|
||||
return status;
|
||||
vsi_ctx->sched.max_lanq[tc] = new_numqs;
|
||||
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1511,7 +1479,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
||||
* @vsi_handle: software VSI handle
|
||||
* @tc: TC number
|
||||
* @maxqs: max number of queues
|
||||
* @owner: lan or rdma
|
||||
* @owner: LAN or RDMA
|
||||
* @enable: TC enabled or disabled
|
||||
*
|
||||
* This function adds/updates VSI nodes based on the number of queues. If TC is
|
||||
@@ -1527,6 +1495,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
||||
enum ice_status status = 0;
|
||||
struct ice_hw *hw = pi->hw;
|
||||
|
||||
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
|
||||
tc_node = ice_sched_get_tc_node(pi, tc);
|
||||
if (!tc_node)
|
||||
return ICE_ERR_PARAM;
|
||||
@@ -1535,7 +1504,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
||||
return ICE_ERR_PARAM;
|
||||
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
|
||||
|
||||
/* suspend the VSI if tc is not enabled */
|
||||
/* suspend the VSI if TC is not enabled */
|
||||
if (!enable) {
|
||||
if (vsi_node && vsi_node->in_use) {
|
||||
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
|
||||
@@ -1586,7 +1555,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
|
||||
* ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
*
|
||||
@@ -1646,8 +1615,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
|
||||
{
|
||||
enum ice_status status = ICE_ERR_PARAM;
|
||||
struct ice_vsi_ctx *vsi_ctx;
|
||||
u8 i, j = 0;
|
||||
u8 i;
|
||||
|
||||
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
|
||||
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
|
||||
return status;
|
||||
mutex_lock(&pi->sched_lock);
|
||||
@@ -1655,8 +1625,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
|
||||
if (!vsi_ctx)
|
||||
goto exit_sched_rm_vsi_cfg;
|
||||
|
||||
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
|
||||
ice_for_each_traffic_class(i) {
|
||||
struct ice_sched_node *vsi_node, *tc_node;
|
||||
u8 j = 0;
|
||||
|
||||
tc_node = ice_sched_get_tc_node(pi, i);
|
||||
if (!tc_node)
|
||||
@@ -1689,7 +1660,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
|
||||
ice_free_sched_node(pi, vsi_node);
|
||||
vsi_ctx->sched.vsi_node[i] = NULL;
|
||||
|
||||
/* clean up agg related vsi info if any */
|
||||
/* clean up aggregator related VSI info if any */
|
||||
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
|
||||
}
|
||||
if (owner == ICE_SCHED_NODE_OWNER_LAN)
|
||||
|
@@ -24,6 +24,10 @@ struct ice_sched_agg_info {
|
||||
};
|
||||
|
||||
/* FW AQ command calls */
|
||||
enum ice_status
|
||||
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
|
||||
struct ice_aqc_get_elem *buf, u16 buf_size,
|
||||
u16 *elems_ret, struct ice_sq_cd *cd);
|
||||
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
|
||||
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
|
||||
void ice_sched_clear_port(struct ice_port_info *pi);
|
||||
|
@@ -12,6 +12,7 @@ enum ice_status {
|
||||
ICE_ERR_PARAM = -1,
|
||||
ICE_ERR_NOT_IMPL = -2,
|
||||
ICE_ERR_NOT_READY = -3,
|
||||
ICE_ERR_NOT_SUPPORTED = -4,
|
||||
ICE_ERR_BAD_PTR = -5,
|
||||
ICE_ERR_INVAL_SIZE = -6,
|
||||
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
|
||||
|
@@ -19,7 +19,7 @@
|
||||
* byte 6 = 0x2: to identify it as locally administered SA MAC
|
||||
* byte 12 = 0x81 & byte 13 = 0x00:
|
||||
* In case of VLAN filter first two bytes defines ether type (0x8100)
|
||||
* and remaining two bytes are placeholder for programming a given VLAN id
|
||||
* and remaining two bytes are placeholder for programming a given VLAN ID
|
||||
* In case of Ether type filter it is treated as header without VLAN tag
|
||||
* and byte 12 and 13 is used to program a given Ether type instead
|
||||
*/
|
||||
@@ -51,7 +51,7 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
|
||||
|
||||
/**
|
||||
* ice_aq_alloc_free_res - command to allocate/free resources
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @num_entries: number of resource entries in buffer
|
||||
* @buf: Indirect buffer to hold data parameters and response
|
||||
* @buf_size: size of buffer for indirect commands
|
||||
@@ -87,7 +87,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
|
||||
|
||||
/**
|
||||
* ice_init_def_sw_recp - initialize the recipe book keeping tables
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Allocate memory for the entire recipe table and initialize the structures/
|
||||
* entries corresponding to basic recipes.
|
||||
@@ -163,7 +163,7 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
|
||||
|
||||
/**
|
||||
* ice_aq_add_vsi
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_ctx: pointer to a VSI context struct
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
@@ -206,7 +206,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
|
||||
|
||||
/**
|
||||
* ice_aq_free_vsi
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_ctx: pointer to a VSI context struct
|
||||
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -242,7 +242,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
|
||||
|
||||
/**
|
||||
* ice_aq_update_vsi
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_ctx: pointer to a VSI context struct
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
@@ -279,7 +279,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
|
||||
|
||||
/**
|
||||
* ice_is_vsi_valid - check whether the VSI is valid or not
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
*
|
||||
* check whether the VSI is valid or not
|
||||
@@ -290,11 +290,11 @@ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_hw_vsi_num - return the hw VSI number
|
||||
* @hw: pointer to the hw struct
|
||||
* ice_get_hw_vsi_num - return the HW VSI number
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
*
|
||||
* return the hw VSI number
|
||||
* return the HW VSI number
|
||||
* Caution: call this function only if VSI is valid (ice_is_vsi_valid)
|
||||
*/
|
||||
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
|
||||
@@ -304,7 +304,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
|
||||
|
||||
/**
|
||||
* ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
*
|
||||
* return the VSI context entry for a given VSI handle
|
||||
@@ -316,21 +316,42 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
|
||||
|
||||
/**
|
||||
* ice_save_vsi_ctx - save the VSI context for a given VSI handle
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
* @vsi: VSI context pointer
|
||||
*
|
||||
* save the VSI context entry for a given VSI handle
|
||||
*/
|
||||
static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
|
||||
struct ice_vsi_ctx *vsi)
|
||||
static void
|
||||
ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
|
||||
{
|
||||
hw->vsi_ctx[vsi_handle] = vsi;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
*/
|
||||
static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
|
||||
{
|
||||
struct ice_vsi_ctx *vsi;
|
||||
u8 i;
|
||||
|
||||
vsi = ice_get_vsi_ctx(hw, vsi_handle);
|
||||
if (!vsi)
|
||||
return;
|
||||
ice_for_each_traffic_class(i) {
|
||||
if (vsi->lan_q_ctx[i]) {
|
||||
devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
|
||||
vsi->lan_q_ctx[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clear_vsi_ctx - clear the VSI context entry
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: VSI handle
|
||||
*
|
||||
* clear the VSI context entry
|
||||
@@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
|
||||
|
||||
vsi = ice_get_vsi_ctx(hw, vsi_handle);
|
||||
if (vsi) {
|
||||
ice_clear_vsi_q_ctx(hw, vsi_handle);
|
||||
devm_kfree(ice_hw_to_dev(hw), vsi);
|
||||
hw->vsi_ctx[vsi_handle] = NULL;
|
||||
}
|
||||
@@ -348,7 +370,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
|
||||
|
||||
/**
|
||||
* ice_clear_all_vsi_ctx - clear all the VSI context entries
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*/
|
||||
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
|
||||
{
|
||||
@@ -360,7 +382,7 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
|
||||
|
||||
/**
|
||||
* ice_add_vsi - add VSI context to the hardware and VSI handle list
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: unique VSI handle provided by drivers
|
||||
* @vsi_ctx: pointer to a VSI context struct
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -383,7 +405,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
|
||||
return status;
|
||||
tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
|
||||
if (!tmp_vsi_ctx) {
|
||||
/* Create a new vsi context */
|
||||
/* Create a new VSI context */
|
||||
tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
|
||||
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
|
||||
if (!tmp_vsi_ctx) {
|
||||
@@ -398,12 +420,12 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
|
||||
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
|
||||
}
|
||||
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_free_vsi- free VSI context from hardware and VSI handle list
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: unique VSI handle
|
||||
* @vsi_ctx: pointer to a VSI context struct
|
||||
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
|
||||
@@ -428,7 +450,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
|
||||
|
||||
/**
|
||||
* ice_update_vsi
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle: unique VSI handle
|
||||
* @vsi_ctx: pointer to a VSI context struct
|
||||
* @cd: pointer to command details structure or NULL
|
||||
@@ -447,8 +469,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
|
||||
|
||||
/**
|
||||
* ice_aq_alloc_free_vsi_list
|
||||
* @hw: pointer to the hw struct
|
||||
* @vsi_list_id: VSI list id returned or used for lookup
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_list_id: VSI list ID returned or used for lookup
|
||||
* @lkup_type: switch rule filter lookup type
|
||||
* @opc: switch rules population command type - pass in the command opcode
|
||||
*
|
||||
@@ -504,7 +526,7 @@ ice_aq_alloc_free_vsi_list_exit:
|
||||
|
||||
/**
|
||||
* ice_aq_sw_rules - add/update/remove switch rules
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @rule_list: pointer to switch rule population list
|
||||
* @rule_list_sz: total size of the rule list in bytes
|
||||
* @num_rules: number of switch rules in the rule_list
|
||||
@@ -643,21 +665,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
|
||||
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
|
||||
fi->fltr_act == ICE_FWD_TO_Q ||
|
||||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
|
||||
fi->lb_en = true;
|
||||
/* Do not set lan_en to TRUE if
|
||||
/* Setting LB for prune actions will result in replicated
|
||||
* packets to the internal switch that will be dropped.
|
||||
*/
|
||||
if (fi->lkup_type != ICE_SW_LKUP_VLAN)
|
||||
fi->lb_en = true;
|
||||
|
||||
/* Set lan_en to TRUE if
|
||||
* 1. The switch is a VEB AND
|
||||
* 2
|
||||
* 2.1 The lookup is MAC with unicast addr for MAC, OR
|
||||
* 2.2 The lookup is MAC_VLAN with unicast addr for MAC
|
||||
* 2.1 The lookup is a directional lookup like ethertype,
|
||||
* promiscuous, ethertype-MAC, promiscuous-VLAN
|
||||
* and default-port OR
|
||||
* 2.2 The lookup is VLAN, OR
|
||||
* 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
|
||||
* 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
|
||||
*
|
||||
* In all other cases, the LAN enable has to be set to true.
|
||||
* OR
|
||||
*
|
||||
* The switch is a VEPA.
|
||||
*
|
||||
* In all other cases, the LAN enable has to be set to false.
|
||||
*/
|
||||
if (!(hw->evb_veb &&
|
||||
((fi->lkup_type == ICE_SW_LKUP_MAC &&
|
||||
is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
|
||||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
|
||||
is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
|
||||
if (hw->evb_veb) {
|
||||
if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
|
||||
fi->lkup_type == ICE_SW_LKUP_PROMISC ||
|
||||
fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
|
||||
fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
|
||||
fi->lkup_type == ICE_SW_LKUP_DFLT ||
|
||||
fi->lkup_type == ICE_SW_LKUP_VLAN ||
|
||||
(fi->lkup_type == ICE_SW_LKUP_MAC &&
|
||||
!is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
|
||||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
|
||||
!is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
|
||||
fi->lan_en = true;
|
||||
} else {
|
||||
fi->lan_en = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -799,7 +843,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
|
||||
* @hw: pointer to the hardware structure
|
||||
* @m_ent: the management entry for which sw marker needs to be added
|
||||
* @sw_marker: sw marker to tag the Rx descriptor with
|
||||
* @l_id: large action resource id
|
||||
* @l_id: large action resource ID
|
||||
*
|
||||
* Create a large action to hold software marker and update the switch rule
|
||||
* entry pointed by m_ent with newly created large action
|
||||
@@ -811,8 +855,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
|
||||
struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
|
||||
/* For software marker we need 3 large actions
|
||||
* 1. FWD action: FWD TO VSI or VSI LIST
|
||||
* 2. GENERIC VALUE action to hold the profile id
|
||||
* 3. GENERIC VALUE action to hold the software marker id
|
||||
* 2. GENERIC VALUE action to hold the profile ID
|
||||
* 3. GENERIC VALUE action to hold the software marker ID
|
||||
*/
|
||||
const u16 num_lg_acts = 3;
|
||||
enum ice_status status;
|
||||
@@ -875,13 +919,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
|
||||
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
|
||||
ice_aqc_opc_update_sw_rules);
|
||||
|
||||
/* Update the action to point to the large action id */
|
||||
/* Update the action to point to the large action ID */
|
||||
rx_tx->pdata.lkup_tx_rx.act =
|
||||
cpu_to_le32(ICE_SINGLE_ACT_PTR |
|
||||
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
|
||||
ICE_SINGLE_ACT_PTR_VAL_M));
|
||||
|
||||
/* Use the filter rule id of the previously created rule with single
|
||||
/* Use the filter rule ID of the previously created rule with single
|
||||
* act. Once the update happens, hardware will treat this as large
|
||||
* action
|
||||
*/
|
||||
@@ -904,10 +948,10 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_handle_arr: array of VSI handles to set in the VSI mapping
|
||||
* @num_vsi: number of VSI handles in the array
|
||||
* @vsi_list_id: VSI list id generated as part of allocate resource
|
||||
* @vsi_list_id: VSI list ID generated as part of allocate resource
|
||||
*
|
||||
* Helper function to create a new entry of VSI list id to VSI mapping
|
||||
* using the given VSI list id
|
||||
* Helper function to create a new entry of VSI list ID to VSI mapping
|
||||
* using the given VSI list ID
|
||||
*/
|
||||
static struct ice_vsi_list_map_info *
|
||||
ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
|
||||
@@ -935,13 +979,13 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_handle_arr: array of VSI handles to form a VSI list
|
||||
* @num_vsi: number of VSI handles in the array
|
||||
* @vsi_list_id: VSI list id generated as part of allocate resource
|
||||
* @vsi_list_id: VSI list ID generated as part of allocate resource
|
||||
* @remove: Boolean value to indicate if this is a remove action
|
||||
* @opc: switch rules population command type - pass in the command opcode
|
||||
* @lkup_type: lookup type of the filter
|
||||
*
|
||||
* Call AQ command to add a new switch rule or update existing switch rule
|
||||
* using the given VSI list id
|
||||
* using the given VSI list ID
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
|
||||
@@ -998,7 +1042,7 @@ exit:
|
||||
|
||||
/**
|
||||
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
* @vsi_handle_arr: array of VSI handles to form a VSI list
|
||||
* @num_vsi: number of VSI handles in the array
|
||||
* @vsi_list_id: stores the ID of the VSI list to be created
|
||||
@@ -1092,7 +1136,7 @@ ice_create_pkt_fwd_rule_exit:
|
||||
* @f_info: filter information for switch rule
|
||||
*
|
||||
* Call AQ command to update a previously created switch rule with a
|
||||
* VSI list id
|
||||
* VSI list ID
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
|
||||
@@ -1119,7 +1163,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
|
||||
|
||||
/**
|
||||
* ice_update_sw_rule_bridge_mode
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Updates unicast switch filter rules based on VEB/VEPA mode
|
||||
*/
|
||||
@@ -1174,7 +1218,7 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
|
||||
* Allocate a new VSI list and add two VSIs
|
||||
* to this list using switch rule command
|
||||
* Update the previously created switch rule with the
|
||||
* newly created VSI list id
|
||||
* newly created VSI list ID
|
||||
* if a VSI list was previously created
|
||||
* Add the new VSI to the previously created VSI list set
|
||||
* using the update switch rule command
|
||||
@@ -1255,7 +1299,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
|
||||
return 0;
|
||||
|
||||
/* Update the previously created VSI list set with
|
||||
* the new VSI id passed in
|
||||
* the new VSI ID passed in
|
||||
*/
|
||||
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
|
||||
opcode = ice_aqc_opc_update_sw_rules;
|
||||
@@ -1263,7 +1307,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
|
||||
status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
|
||||
vsi_list_id, false, opcode,
|
||||
new_fltr->lkup_type);
|
||||
/* update VSI list mapping info with new VSI id */
|
||||
/* update VSI list mapping info with new VSI ID */
|
||||
if (!status)
|
||||
set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
|
||||
}
|
||||
@@ -1305,7 +1349,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
|
||||
* @hw: pointer to the hardware structure
|
||||
* @recp_id: lookup type for which VSI lists needs to be searched
|
||||
* @vsi_handle: VSI handle to be found in VSI list
|
||||
* @vsi_list_id: VSI list id found containing vsi_handle
|
||||
* @vsi_list_id: VSI list ID found containing vsi_handle
|
||||
*
|
||||
* Helper function to search a VSI list with single entry containing given VSI
|
||||
* handle element. This can be extended further to search VSI list with more
|
||||
@@ -1336,7 +1380,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
|
||||
/**
|
||||
* ice_add_rule_internal - add rule for a given lookup type
|
||||
* @hw: pointer to the hardware structure
|
||||
* @recp_id: lookup type (recipe id) for which rule has to be added
|
||||
* @recp_id: lookup type (recipe ID) for which rule has to be added
|
||||
* @f_entry: structure containing MAC forwarding information
|
||||
*
|
||||
* Adds or updates the rule lists for a given recipe
|
||||
@@ -1381,7 +1425,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
|
||||
/**
|
||||
* ice_remove_vsi_list_rule
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_list_id: VSI list id generated as part of allocate resource
|
||||
* @vsi_list_id: VSI list ID generated as part of allocate resource
|
||||
* @lkup_type: switch rule filter lookup type
|
||||
*
|
||||
* The VSI list should be emptied before this function is called to remove the
|
||||
@@ -1506,7 +1550,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
|
||||
/**
|
||||
* ice_remove_rule_internal - Remove a filter rule of a given type
|
||||
* @hw: pointer to the hardware structure
|
||||
* @recp_id: recipe id for which the rule needs to removed
|
||||
* @recp_id: recipe ID for which the rule needs to removed
|
||||
* @f_entry: rule entry containing filter information
|
||||
*/
|
||||
static enum ice_status
|
||||
@@ -1556,7 +1600,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
|
||||
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
|
||||
if (status)
|
||||
goto exit;
|
||||
/* if vsi count goes to zero after updating the vsi list */
|
||||
/* if VSI count goes to zero after updating the VSI list */
|
||||
if (list_elem->vsi_count == 0)
|
||||
remove_rule = true;
|
||||
}
|
||||
@@ -1634,7 +1678,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
|
||||
return ICE_ERR_PARAM;
|
||||
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
|
||||
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
|
||||
/* update the src in case it is vsi num */
|
||||
/* update the src in case it is VSI num */
|
||||
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
|
||||
return ICE_ERR_PARAM;
|
||||
m_list_itr->fltr_info.src = hw_vsi_id;
|
||||
@@ -1710,7 +1754,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
|
||||
((u8 *)r_iter + (elem_sent * s_rule_size));
|
||||
}
|
||||
|
||||
/* Fill up rule id based on the value returned from FW */
|
||||
/* Fill up rule ID based on the value returned from FW */
|
||||
r_iter = s_rule;
|
||||
list_for_each_entry(m_list_itr, m_list, list_entry) {
|
||||
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
|
||||
@@ -1770,7 +1814,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
|
||||
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
|
||||
new_fltr = &f_entry->fltr_info;
|
||||
|
||||
/* VLAN id should only be 12 bits */
|
||||
/* VLAN ID should only be 12 bits */
|
||||
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
@@ -1828,7 +1872,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
|
||||
}
|
||||
}
|
||||
} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
|
||||
/* Update existing VSI list to add new VSI id only if it used
|
||||
/* Update existing VSI list to add new VSI ID only if it used
|
||||
* by one VLAN rule.
|
||||
*/
|
||||
cur_fltr = &v_list_itr->fltr_info;
|
||||
@@ -1838,7 +1882,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
|
||||
/* If VLAN rule exists and VSI list being used by this rule is
|
||||
* referenced by more than 1 VLAN rule. Then create a new VSI
|
||||
* list appending previous VSI with new VSI and update existing
|
||||
* VLAN rule to point to new VSI list id
|
||||
* VLAN rule to point to new VSI list ID
|
||||
*/
|
||||
struct ice_fltr_info tmp_fltr;
|
||||
u16 vsi_handle_arr[2];
|
||||
@@ -1925,6 +1969,65 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_add_eth_mac - Add ethertype and MAC based filter rule
|
||||
* @hw: pointer to the hardware structure
|
||||
* @em_list: list of ether type MAC filter, MAC is optional
|
||||
*/
|
||||
enum ice_status
|
||||
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
|
||||
{
|
||||
struct ice_fltr_list_entry *em_list_itr;
|
||||
|
||||
if (!em_list || !hw)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
list_for_each_entry(em_list_itr, em_list, list_entry) {
|
||||
enum ice_sw_lkup_type l_type =
|
||||
em_list_itr->fltr_info.lkup_type;
|
||||
|
||||
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
|
||||
l_type != ICE_SW_LKUP_ETHERTYPE)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
em_list_itr->fltr_info.flag = ICE_FLTR_TX;
|
||||
em_list_itr->status = ice_add_rule_internal(hw, l_type,
|
||||
em_list_itr);
|
||||
if (em_list_itr->status)
|
||||
return em_list_itr->status;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
|
||||
* @hw: pointer to the hardware structure
|
||||
* @em_list: list of ethertype or ethertype MAC entries
|
||||
*/
|
||||
enum ice_status
|
||||
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
|
||||
{
|
||||
struct ice_fltr_list_entry *em_list_itr, *tmp;
|
||||
|
||||
if (!em_list || !hw)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
|
||||
enum ice_sw_lkup_type l_type =
|
||||
em_list_itr->fltr_info.lkup_type;
|
||||
|
||||
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
|
||||
l_type != ICE_SW_LKUP_ETHERTYPE)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
em_list_itr->status = ice_remove_rule_internal(hw, l_type,
|
||||
em_list_itr);
|
||||
if (em_list_itr->status)
|
||||
return em_list_itr->status;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rem_sw_rule_info
|
||||
* @hw: pointer to the hardware structure
|
||||
@@ -2170,7 +2273,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
|
||||
struct ice_fltr_mgmt_list_entry *fm_entry;
|
||||
enum ice_status status = 0;
|
||||
|
||||
/* check to make sure VSI id is valid and within boundary */
|
||||
/* check to make sure VSI ID is valid and within boundary */
|
||||
if (!ice_is_vsi_valid(hw, vsi_handle))
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
@@ -2189,6 +2292,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_determine_promisc_mask
|
||||
* @fi: filter info to parse
|
||||
*
|
||||
* Helper function to determine which ICE_PROMISC_ mask corresponds
|
||||
* to given filter into.
|
||||
*/
|
||||
static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
|
||||
{
|
||||
u16 vid = fi->l_data.mac_vlan.vlan_id;
|
||||
u8 *macaddr = fi->l_data.mac.mac_addr;
|
||||
bool is_tx_fltr = false;
|
||||
u8 promisc_mask = 0;
|
||||
|
||||
if (fi->flag == ICE_FLTR_TX)
|
||||
is_tx_fltr = true;
|
||||
|
||||
if (is_broadcast_ether_addr(macaddr))
|
||||
promisc_mask |= is_tx_fltr ?
|
||||
ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
|
||||
else if (is_multicast_ether_addr(macaddr))
|
||||
promisc_mask |= is_tx_fltr ?
|
||||
ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
|
||||
else if (is_unicast_ether_addr(macaddr))
|
||||
promisc_mask |= is_tx_fltr ?
|
||||
ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
|
||||
if (vid)
|
||||
promisc_mask |= is_tx_fltr ?
|
||||
ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
|
||||
|
||||
return promisc_mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_remove_promisc - Remove promisc based filter rules
|
||||
* @hw: pointer to the hardware structure
|
||||
* @recp_id: recipe ID for which the rule needs to removed
|
||||
* @v_list: list of promisc entries
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
|
||||
struct list_head *v_list)
|
||||
{
|
||||
struct ice_fltr_list_entry *v_list_itr, *tmp;
|
||||
|
||||
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
|
||||
v_list_itr->status =
|
||||
ice_remove_rule_internal(hw, recp_id, v_list_itr);
|
||||
if (v_list_itr->status)
|
||||
return v_list_itr->status;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_handle: VSI handle to clear mode
|
||||
* @promisc_mask: mask of promiscuous config bits to clear
|
||||
* @vid: VLAN ID to clear VLAN promiscuous
|
||||
*/
|
||||
enum ice_status
|
||||
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||
u16 vid)
|
||||
{
|
||||
struct ice_switch_info *sw = hw->switch_info;
|
||||
struct ice_fltr_list_entry *fm_entry, *tmp;
|
||||
struct list_head remove_list_head;
|
||||
struct ice_fltr_mgmt_list_entry *itr;
|
||||
struct list_head *rule_head;
|
||||
struct mutex *rule_lock; /* Lock to protect filter rule list */
|
||||
enum ice_status status = 0;
|
||||
u8 recipe_id;
|
||||
|
||||
if (!ice_is_vsi_valid(hw, vsi_handle))
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
if (vid)
|
||||
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
|
||||
else
|
||||
recipe_id = ICE_SW_LKUP_PROMISC;
|
||||
|
||||
rule_head = &sw->recp_list[recipe_id].filt_rules;
|
||||
rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
|
||||
|
||||
INIT_LIST_HEAD(&remove_list_head);
|
||||
|
||||
mutex_lock(rule_lock);
|
||||
list_for_each_entry(itr, rule_head, list_entry) {
|
||||
u8 fltr_promisc_mask = 0;
|
||||
|
||||
if (!ice_vsi_uses_fltr(itr, vsi_handle))
|
||||
continue;
|
||||
|
||||
fltr_promisc_mask |=
|
||||
ice_determine_promisc_mask(&itr->fltr_info);
|
||||
|
||||
/* Skip if filter is not completely specified by given mask */
|
||||
if (fltr_promisc_mask & ~promisc_mask)
|
||||
continue;
|
||||
|
||||
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
|
||||
&remove_list_head,
|
||||
&itr->fltr_info);
|
||||
if (status) {
|
||||
mutex_unlock(rule_lock);
|
||||
goto free_fltr_list;
|
||||
}
|
||||
}
|
||||
mutex_unlock(rule_lock);
|
||||
|
||||
status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
|
||||
|
||||
free_fltr_list:
|
||||
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
|
||||
list_del(&fm_entry->list_entry);
|
||||
devm_kfree(ice_hw_to_dev(hw), fm_entry);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_handle: VSI handle to configure
|
||||
* @promisc_mask: mask of promiscuous config bits
|
||||
* @vid: VLAN ID to set VLAN promiscuous
|
||||
*/
|
||||
enum ice_status
|
||||
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
|
||||
{
|
||||
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
|
||||
struct ice_fltr_list_entry f_list_entry;
|
||||
struct ice_fltr_info new_fltr;
|
||||
enum ice_status status = 0;
|
||||
bool is_tx_fltr;
|
||||
u16 hw_vsi_id;
|
||||
int pkt_type;
|
||||
u8 recipe_id;
|
||||
|
||||
if (!ice_is_vsi_valid(hw, vsi_handle))
|
||||
return ICE_ERR_PARAM;
|
||||
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
|
||||
|
||||
memset(&new_fltr, 0, sizeof(new_fltr));
|
||||
|
||||
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
|
||||
new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
|
||||
new_fltr.l_data.mac_vlan.vlan_id = vid;
|
||||
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
|
||||
} else {
|
||||
new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
|
||||
recipe_id = ICE_SW_LKUP_PROMISC;
|
||||
}
|
||||
|
||||
/* Separate filters must be set for each direction/packet type
|
||||
* combination, so we will loop over the mask value, store the
|
||||
* individual type, and clear it out in the input mask as it
|
||||
* is found.
|
||||
*/
|
||||
while (promisc_mask) {
|
||||
u8 *mac_addr;
|
||||
|
||||
pkt_type = 0;
|
||||
is_tx_fltr = false;
|
||||
|
||||
if (promisc_mask & ICE_PROMISC_UCAST_RX) {
|
||||
promisc_mask &= ~ICE_PROMISC_UCAST_RX;
|
||||
pkt_type = UCAST_FLTR;
|
||||
} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
|
||||
promisc_mask &= ~ICE_PROMISC_UCAST_TX;
|
||||
pkt_type = UCAST_FLTR;
|
||||
is_tx_fltr = true;
|
||||
} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
|
||||
promisc_mask &= ~ICE_PROMISC_MCAST_RX;
|
||||
pkt_type = MCAST_FLTR;
|
||||
} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
|
||||
promisc_mask &= ~ICE_PROMISC_MCAST_TX;
|
||||
pkt_type = MCAST_FLTR;
|
||||
is_tx_fltr = true;
|
||||
} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
|
||||
promisc_mask &= ~ICE_PROMISC_BCAST_RX;
|
||||
pkt_type = BCAST_FLTR;
|
||||
} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
|
||||
promisc_mask &= ~ICE_PROMISC_BCAST_TX;
|
||||
pkt_type = BCAST_FLTR;
|
||||
is_tx_fltr = true;
|
||||
}
|
||||
|
||||
/* Check for VLAN promiscuous flag */
|
||||
if (promisc_mask & ICE_PROMISC_VLAN_RX) {
|
||||
promisc_mask &= ~ICE_PROMISC_VLAN_RX;
|
||||
} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
|
||||
promisc_mask &= ~ICE_PROMISC_VLAN_TX;
|
||||
is_tx_fltr = true;
|
||||
}
|
||||
|
||||
/* Set filter DA based on packet type */
|
||||
mac_addr = new_fltr.l_data.mac.mac_addr;
|
||||
if (pkt_type == BCAST_FLTR) {
|
||||
eth_broadcast_addr(mac_addr);
|
||||
} else if (pkt_type == MCAST_FLTR ||
|
||||
pkt_type == UCAST_FLTR) {
|
||||
/* Use the dummy ether header DA */
|
||||
ether_addr_copy(mac_addr, dummy_eth_header);
|
||||
if (pkt_type == MCAST_FLTR)
|
||||
mac_addr[0] |= 0x1; /* Set multicast bit */
|
||||
}
|
||||
|
||||
/* Need to reset this to zero for all iterations */
|
||||
new_fltr.flag = 0;
|
||||
if (is_tx_fltr) {
|
||||
new_fltr.flag |= ICE_FLTR_TX;
|
||||
new_fltr.src = hw_vsi_id;
|
||||
} else {
|
||||
new_fltr.flag |= ICE_FLTR_RX;
|
||||
new_fltr.src = hw->port_info->lport;
|
||||
}
|
||||
|
||||
new_fltr.fltr_act = ICE_FWD_TO_VSI;
|
||||
new_fltr.vsi_handle = vsi_handle;
|
||||
new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
|
||||
f_list_entry.fltr_info = new_fltr;
|
||||
|
||||
status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
|
||||
if (status)
|
||||
goto set_promisc_exit;
|
||||
}
|
||||
|
||||
set_promisc_exit:
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_vlan_vsi_promisc
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_handle: VSI handle to configure
|
||||
* @promisc_mask: mask of promiscuous config bits
|
||||
* @rm_vlan_promisc: Clear VLANs VSI promisc mode
|
||||
*
|
||||
* Configure VSI with all associated VLANs to given promiscuous mode(s)
|
||||
*/
|
||||
enum ice_status
|
||||
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||
bool rm_vlan_promisc)
|
||||
{
|
||||
struct ice_switch_info *sw = hw->switch_info;
|
||||
struct ice_fltr_list_entry *list_itr, *tmp;
|
||||
struct list_head vsi_list_head;
|
||||
struct list_head *vlan_head;
|
||||
struct mutex *vlan_lock; /* Lock to protect filter rule list */
|
||||
enum ice_status status;
|
||||
u16 vlan_id;
|
||||
|
||||
INIT_LIST_HEAD(&vsi_list_head);
|
||||
vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
|
||||
vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
|
||||
mutex_lock(vlan_lock);
|
||||
status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
|
||||
&vsi_list_head);
|
||||
mutex_unlock(vlan_lock);
|
||||
if (status)
|
||||
goto free_fltr_list;
|
||||
|
||||
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
|
||||
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
|
||||
if (rm_vlan_promisc)
|
||||
status = ice_clear_vsi_promisc(hw, vsi_handle,
|
||||
promisc_mask, vlan_id);
|
||||
else
|
||||
status = ice_set_vsi_promisc(hw, vsi_handle,
|
||||
promisc_mask, vlan_id);
|
||||
if (status)
|
||||
break;
|
||||
}
|
||||
|
||||
free_fltr_list:
|
||||
list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
|
||||
list_del(&list_itr->list_entry);
|
||||
devm_kfree(ice_hw_to_dev(hw), list_itr);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
|
||||
* @hw: pointer to the hardware structure
|
||||
@@ -2224,12 +2612,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
|
||||
case ICE_SW_LKUP_VLAN:
|
||||
ice_remove_vlan(hw, &remove_list_head);
|
||||
break;
|
||||
case ICE_SW_LKUP_PROMISC:
|
||||
case ICE_SW_LKUP_PROMISC_VLAN:
|
||||
ice_remove_promisc(hw, lkup, &remove_list_head);
|
||||
break;
|
||||
case ICE_SW_LKUP_MAC_VLAN:
|
||||
case ICE_SW_LKUP_ETHERTYPE:
|
||||
case ICE_SW_LKUP_ETHERTYPE_MAC:
|
||||
case ICE_SW_LKUP_PROMISC:
|
||||
case ICE_SW_LKUP_DFLT:
|
||||
case ICE_SW_LKUP_PROMISC_VLAN:
|
||||
case ICE_SW_LKUP_LAST:
|
||||
default:
|
||||
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
|
||||
@@ -2263,7 +2653,7 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
|
||||
* ice_replay_vsi_fltr - Replay filters for requested VSI
|
||||
* @hw: pointer to the hardware structure
|
||||
* @vsi_handle: driver VSI handle
|
||||
* @recp_id: Recipe id for which rules need to be replayed
|
||||
* @recp_id: Recipe ID for which rules need to be replayed
|
||||
* @list_head: list for which filters need to be replayed
|
||||
*
|
||||
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
|
||||
@@ -2287,7 +2677,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
|
||||
f_entry.fltr_info = itr->fltr_info;
|
||||
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
|
||||
itr->fltr_info.vsi_handle == vsi_handle) {
|
||||
/* update the src in case it is vsi num */
|
||||
/* update the src in case it is VSI num */
|
||||
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
|
||||
f_entry.fltr_info.src = hw_vsi_id;
|
||||
status = ice_add_rule_internal(hw, recp_id, &f_entry);
|
||||
@@ -2302,7 +2692,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
|
||||
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
|
||||
f_entry.fltr_info.vsi_handle = vsi_handle;
|
||||
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
|
||||
/* update the src in case it is vsi num */
|
||||
/* update the src in case it is VSI num */
|
||||
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
|
||||
f_entry.fltr_info.src = hw_vsi_id;
|
||||
if (recp_id == ICE_SW_LKUP_VLAN)
|
||||
@@ -2342,7 +2732,7 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
|
||||
|
||||
/**
|
||||
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
|
||||
* @hw: pointer to the hw struct
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Deletes the filter replay rules.
|
||||
*/
|
||||
|
@@ -9,6 +9,13 @@
|
||||
#define ICE_SW_CFG_MAX_BUF_LEN 2048
|
||||
#define ICE_DFLT_VSI_INVAL 0xff
|
||||
#define ICE_VSI_INVAL_ID 0xffff
|
||||
#define ICE_INVAL_Q_HANDLE 0xFFFF
|
||||
#define ICE_INVAL_Q_HANDLE 0xFFFF
|
||||
|
||||
/* VSI queue context structure */
|
||||
struct ice_q_ctx {
|
||||
u16 q_handle;
|
||||
};
|
||||
|
||||
/* VSI context structure for add/get/update/free operations */
|
||||
struct ice_vsi_ctx {
|
||||
@@ -20,6 +27,8 @@ struct ice_vsi_ctx {
|
||||
struct ice_sched_vsi_info sched;
|
||||
u8 alloc_from_pool;
|
||||
u8 vf_num;
|
||||
u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
|
||||
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
|
||||
};
|
||||
|
||||
enum ice_sw_fwd_act_type {
|
||||
@@ -44,7 +53,7 @@ enum ice_sw_lkup_type {
|
||||
ICE_SW_LKUP_LAST
|
||||
};
|
||||
|
||||
/* type of filter src id */
|
||||
/* type of filter src ID */
|
||||
enum ice_src_id {
|
||||
ICE_SRC_ID_UNKNOWN = 0,
|
||||
ICE_SRC_ID_VSI,
|
||||
@@ -95,8 +104,8 @@ struct ice_fltr_info {
|
||||
|
||||
/* Depending on filter action */
|
||||
union {
|
||||
/* queue id in case of ICE_FWD_TO_Q and starting
|
||||
* queue id in case of ICE_FWD_TO_QGRP.
|
||||
/* queue ID in case of ICE_FWD_TO_Q and starting
|
||||
* queue ID in case of ICE_FWD_TO_QGRP.
|
||||
*/
|
||||
u16 q_id:11;
|
||||
u16 hw_vsi_id:10;
|
||||
@@ -143,7 +152,7 @@ struct ice_sw_recipe {
|
||||
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
|
||||
};
|
||||
|
||||
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
|
||||
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
|
||||
struct ice_vsi_list_map_info {
|
||||
struct list_head list_entry;
|
||||
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
|
||||
@@ -165,7 +174,7 @@ struct ice_fltr_list_entry {
|
||||
* used for VLAN membership.
|
||||
*/
|
||||
struct ice_fltr_mgmt_list_entry {
|
||||
/* back pointer to VSI list id to VSI list mapping */
|
||||
/* back pointer to VSI list ID to VSI list mapping */
|
||||
struct ice_vsi_list_map_info *vsi_list_info;
|
||||
u16 vsi_count;
|
||||
#define ICE_INVAL_LG_ACT_INDEX 0xffff
|
||||
@@ -178,6 +187,17 @@ struct ice_fltr_mgmt_list_entry {
|
||||
u8 counter_index;
|
||||
};
|
||||
|
||||
enum ice_promisc_flags {
|
||||
ICE_PROMISC_UCAST_RX = 0x1,
|
||||
ICE_PROMISC_UCAST_TX = 0x2,
|
||||
ICE_PROMISC_MCAST_RX = 0x4,
|
||||
ICE_PROMISC_MCAST_TX = 0x8,
|
||||
ICE_PROMISC_BCAST_RX = 0x10,
|
||||
ICE_PROMISC_BCAST_TX = 0x20,
|
||||
ICE_PROMISC_VLAN_RX = 0x40,
|
||||
ICE_PROMISC_VLAN_TX = 0x80,
|
||||
};
|
||||
|
||||
/* VSI related commands */
|
||||
enum ice_status
|
||||
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
|
||||
@@ -198,11 +218,27 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
|
||||
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
|
||||
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
|
||||
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
|
||||
enum ice_status
|
||||
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
|
||||
enum ice_status
|
||||
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
|
||||
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
|
||||
enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
|
||||
enum ice_status
|
||||
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
|
||||
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
|
||||
|
||||
/* Promisc/defport setup for VSIs */
|
||||
enum ice_status
|
||||
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
|
||||
enum ice_status
|
||||
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||
u16 vid);
|
||||
enum ice_status
|
||||
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||
u16 vid);
|
||||
enum ice_status
|
||||
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
|
||||
bool rm_vlan_promisc);
|
||||
|
||||
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
|
||||
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
|
||||
|
@@ -6,6 +6,7 @@
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/mm.h>
|
||||
#include "ice.h"
|
||||
#include "ice_dcb_lib.h"
|
||||
|
||||
#define ICE_RX_HDR_SIZE 256
|
||||
|
||||
@@ -100,8 +101,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
|
||||
*
|
||||
* Returns true if there's any budget left (e.g. the clean is finished)
|
||||
*/
|
||||
static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
|
||||
int napi_budget)
|
||||
static bool
|
||||
ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
|
||||
{
|
||||
unsigned int total_bytes = 0, total_pkts = 0;
|
||||
unsigned int budget = vsi->work_lmt;
|
||||
@@ -236,9 +237,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
|
||||
if (!tx_ring->tx_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* round up to nearest 4K */
|
||||
/* round up to nearest page */
|
||||
tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
|
||||
4096);
|
||||
PAGE_SIZE);
|
||||
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
|
||||
GFP_KERNEL);
|
||||
if (!tx_ring->desc) {
|
||||
@@ -282,8 +283,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
|
||||
if (!rx_buf->page)
|
||||
continue;
|
||||
|
||||
dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
__free_pages(rx_buf->page, 0);
|
||||
/* Invalidate cache lines that may have been written to by
|
||||
* device so that we avoid corrupting memory.
|
||||
*/
|
||||
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
|
||||
rx_buf->page_offset,
|
||||
ICE_RXBUF_2048, DMA_FROM_DEVICE);
|
||||
|
||||
/* free resources associated with mapping */
|
||||
dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
|
||||
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
|
||||
|
||||
rx_buf->page = NULL;
|
||||
rx_buf->page_offset = 0;
|
||||
@@ -339,9 +349,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
|
||||
if (!rx_ring->rx_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* round up to nearest 4K */
|
||||
rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
|
||||
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
||||
/* round up to nearest page */
|
||||
rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
|
||||
PAGE_SIZE);
|
||||
rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
|
||||
GFP_KERNEL);
|
||||
if (!rx_ring->desc) {
|
||||
@@ -389,8 +399,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
|
||||
* Returns true if the page was successfully allocated or
|
||||
* reused.
|
||||
*/
|
||||
static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
|
||||
struct ice_rx_buf *bi)
|
||||
static bool
|
||||
ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
|
||||
{
|
||||
struct page *page = bi->page;
|
||||
dma_addr_t dma;
|
||||
@@ -409,7 +419,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
|
||||
}
|
||||
|
||||
/* map page for use */
|
||||
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
|
||||
|
||||
/* if mapping failed free memory back to system since
|
||||
* there isn't much point in holding memory we can't use
|
||||
@@ -423,6 +434,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
|
||||
bi->dma = dma;
|
||||
bi->page = page;
|
||||
bi->page_offset = 0;
|
||||
page_ref_add(page, USHRT_MAX - 1);
|
||||
bi->pagecnt_bias = USHRT_MAX;
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -444,7 +457,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
|
||||
if (!rx_ring->netdev || !cleaned_count)
|
||||
return false;
|
||||
|
||||
/* get the RX descriptor and buffer based on next_to_use */
|
||||
/* get the Rx descriptor and buffer based on next_to_use */
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntu);
|
||||
bi = &rx_ring->rx_buf[ntu];
|
||||
|
||||
@@ -452,6 +465,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
|
||||
if (!ice_alloc_mapped_page(rx_ring, bi))
|
||||
goto no_bufs;
|
||||
|
||||
/* sync the buffer for use by the device */
|
||||
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
||||
bi->page_offset,
|
||||
ICE_RXBUF_2048,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* Refresh the desc even if buffer_addrs didn't change
|
||||
* because each write-back erases this info.
|
||||
*/
|
||||
@@ -497,61 +516,43 @@ static bool ice_page_is_reserved(struct page *page)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff
|
||||
* @rx_buf: buffer containing page to add
|
||||
* @rx_desc: descriptor containing length of buffer written by hardware
|
||||
* @skb: sk_buf to place the data into
|
||||
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
|
||||
* @rx_buf: Rx buffer to adjust
|
||||
* @size: Size of adjustment
|
||||
*
|
||||
* This function will add the data contained in rx_buf->page to the skb.
|
||||
* This is done either through a direct copy if the data in the buffer is
|
||||
* less than the skb header size, otherwise it will just attach the page as
|
||||
* a frag to the skb.
|
||||
*
|
||||
* The function will then update the page offset if necessary and return
|
||||
* true if the buffer can be reused by the adapter.
|
||||
* Update the offset within page so that Rx buf will be ready to be reused.
|
||||
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
|
||||
* so the second half of page assigned to Rx buffer will be used, otherwise
|
||||
* the offset is moved by the @size bytes
|
||||
*/
|
||||
static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
|
||||
union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
static void
|
||||
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
|
||||
{
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = ICE_RXBUF_2048;
|
||||
/* flip page offset to other buffer */
|
||||
rx_buf->page_offset ^= size;
|
||||
#else
|
||||
unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
|
||||
unsigned int truesize;
|
||||
#endif /* PAGE_SIZE < 8192) */
|
||||
|
||||
struct page *page;
|
||||
unsigned int size;
|
||||
|
||||
size = le16_to_cpu(rx_desc->wb.pkt_len) &
|
||||
ICE_RX_FLX_DESC_PKT_LEN_M;
|
||||
|
||||
page = rx_buf->page;
|
||||
/* move offset up to the next cache line */
|
||||
rx_buf->page_offset += size;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
|
||||
* @rx_buf: buffer containing the page
|
||||
*
|
||||
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
|
||||
* which will assign the current buffer to the buffer that next_to_alloc is
|
||||
* pointing to; otherwise, the DMA mapping needs to be destroyed and
|
||||
* page freed
|
||||
*/
|
||||
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
|
||||
{
|
||||
#if (PAGE_SIZE >= 8192)
|
||||
truesize = ALIGN(size, L1_CACHE_BYTES);
|
||||
#endif /* PAGE_SIZE >= 8192) */
|
||||
|
||||
/* will the data fit in the skb we allocated? if so, just
|
||||
* copy it as it is pretty small anyway
|
||||
*/
|
||||
if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
|
||||
unsigned char *va = page_address(page) + rx_buf->page_offset;
|
||||
|
||||
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
||||
|
||||
/* page is not reserved, we can reuse buffer as-is */
|
||||
if (likely(!ice_page_is_reserved(page)))
|
||||
return true;
|
||||
|
||||
/* this page cannot be reused so discard it */
|
||||
__free_pages(page, 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
rx_buf->page_offset, size, truesize);
|
||||
unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
|
||||
#endif
|
||||
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
|
||||
struct page *page = rx_buf->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ice_page_is_reserved(page)))
|
||||
@@ -559,27 +560,52 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely(page_count(page) != 1))
|
||||
if (unlikely((page_count(page) - pagecnt_bias) > 1))
|
||||
return false;
|
||||
|
||||
/* flip page offset to other buffer */
|
||||
rx_buf->page_offset ^= truesize;
|
||||
#else
|
||||
/* move offset up to the next cache line */
|
||||
rx_buf->page_offset += truesize;
|
||||
|
||||
if (rx_buf->page_offset > last_offset)
|
||||
return false;
|
||||
#endif /* PAGE_SIZE < 8192) */
|
||||
|
||||
/* Even if we own the page, we are not allowed to use atomic_set()
|
||||
* This would break get_page_unless_zero() users.
|
||||
/* If we have drained the page fragment pool we need to update
|
||||
* the pagecnt_bias and page count so that we fully restock the
|
||||
* number of references the driver holds.
|
||||
*/
|
||||
get_page(rx_buf->page);
|
||||
if (unlikely(pagecnt_bias == 1)) {
|
||||
page_ref_add(page, USHRT_MAX - 1);
|
||||
rx_buf->pagecnt_bias = USHRT_MAX;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
|
||||
* @rx_buf: buffer containing page to add
|
||||
* @skb: sk_buff to place the data into
|
||||
* @size: packet length from rx_desc
|
||||
*
|
||||
* This function will add the data contained in rx_buf->page to the skb.
|
||||
* It will just attach the page as a frag to the skb.
|
||||
* The function will then update the page offset.
|
||||
*/
|
||||
static void
|
||||
ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
|
||||
unsigned int size)
|
||||
{
|
||||
#if (PAGE_SIZE >= 8192)
|
||||
unsigned int truesize = SKB_DATA_ALIGN(size);
|
||||
#else
|
||||
unsigned int truesize = ICE_RXBUF_2048;
|
||||
#endif
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
|
||||
rx_buf->page_offset, size, truesize);
|
||||
|
||||
/* page is being used so we must update the page offset */
|
||||
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_reuse_rx_page - page flip buffer and store it back on the ring
|
||||
* @rx_ring: Rx descriptor ring to store buffers on
|
||||
@@ -587,8 +613,8 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
|
||||
*
|
||||
* Synchronizes page for reuse by the adapter
|
||||
*/
|
||||
static void ice_reuse_rx_page(struct ice_ring *rx_ring,
|
||||
struct ice_rx_buf *old_buf)
|
||||
static void
|
||||
ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
|
||||
{
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
struct ice_rx_buf *new_buf;
|
||||
@@ -599,121 +625,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
*new_buf = *old_buf;
|
||||
/* Transfer page from old buffer to new buffer.
|
||||
* Move each member individually to avoid possible store
|
||||
* forwarding stalls and unnecessary copy of skb.
|
||||
*/
|
||||
new_buf->dma = old_buf->dma;
|
||||
new_buf->page = old_buf->page;
|
||||
new_buf->page_offset = old_buf->page_offset;
|
||||
new_buf->pagecnt_bias = old_buf->pagecnt_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_fetch_rx_buf - Allocate skb and populate it
|
||||
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
|
||||
* @rx_ring: Rx descriptor ring to transact packets on
|
||||
* @rx_desc: descriptor containing info written by hardware
|
||||
* @skb: skb to be used
|
||||
* @size: size of buffer to add to skb
|
||||
*
|
||||
* This function allocates an skb on the fly, and populates it with the page
|
||||
* data from the current receive descriptor, taking care to set up the skb
|
||||
* correctly, as well as handling calling the page recycle function if
|
||||
* necessary.
|
||||
* This function will pull an Rx buffer from the ring and synchronize it
|
||||
* for use by the CPU.
|
||||
*/
|
||||
static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc)
|
||||
static struct ice_rx_buf *
|
||||
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
|
||||
const unsigned int size)
|
||||
{
|
||||
struct ice_rx_buf *rx_buf;
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
|
||||
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
|
||||
page = rx_buf->page;
|
||||
prefetchw(page);
|
||||
prefetchw(rx_buf->page);
|
||||
*skb = rx_buf->skb;
|
||||
|
||||
skb = rx_buf->skb;
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
|
||||
rx_buf->page_offset, size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (likely(!skb)) {
|
||||
u8 *page_addr = page_address(page) + rx_buf->page_offset;
|
||||
/* We have pulled a buffer for use, so decrement pagecnt_bias */
|
||||
rx_buf->pagecnt_bias--;
|
||||
|
||||
/* prefetch first cache line of first page */
|
||||
prefetch(page_addr);
|
||||
return rx_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_construct_skb - Allocate skb and populate it
|
||||
* @rx_ring: Rx descriptor ring to transact packets on
|
||||
* @rx_buf: Rx buffer to pull data from
|
||||
* @size: the length of the packet
|
||||
*
|
||||
* This function allocates an skb. It then populates it with the page
|
||||
* data from the current receive descriptor, taking care to set up the
|
||||
* skb correctly.
|
||||
*/
|
||||
static struct sk_buff *
|
||||
ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
|
||||
unsigned int size)
|
||||
{
|
||||
void *va = page_address(rx_buf->page) + rx_buf->page_offset;
|
||||
unsigned int headlen;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* prefetch first cache line of first page */
|
||||
prefetch(va);
|
||||
#if L1_CACHE_BYTES < 128
|
||||
prefetch((void *)(page_addr + L1_CACHE_BYTES));
|
||||
prefetch((u8 *)va + L1_CACHE_BYTES);
|
||||
#endif /* L1_CACHE_BYTES */
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||
ICE_RX_HDR_SIZE,
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (unlikely(!skb)) {
|
||||
rx_ring->rx_stats.alloc_buf_failed++;
|
||||
return NULL;
|
||||
}
|
||||
/* allocate a skb to store the frags */
|
||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
/* we will be copying header into skb->data in
|
||||
* pskb_may_pull so it is in our interest to prefetch
|
||||
* it now to avoid a possible cache miss
|
||||
skb_record_rx_queue(skb, rx_ring->q_index);
|
||||
/* Determine available headroom for copy */
|
||||
headlen = size;
|
||||
if (headlen > ICE_RX_HDR_SIZE)
|
||||
headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
|
||||
|
||||
/* if we exhaust the linear part then add what is left as a frag */
|
||||
size -= headlen;
|
||||
if (size) {
|
||||
#if (PAGE_SIZE >= 8192)
|
||||
unsigned int truesize = SKB_DATA_ALIGN(size);
|
||||
#else
|
||||
unsigned int truesize = ICE_RXBUF_2048;
|
||||
#endif
|
||||
skb_add_rx_frag(skb, 0, rx_buf->page,
|
||||
rx_buf->page_offset + headlen, size, truesize);
|
||||
/* buffer is used by skb, update page_offset */
|
||||
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
|
||||
} else {
|
||||
/* buffer is unused, reset bias back to rx_buf; data was copied
|
||||
* onto skb's linear part so there's no need for adjusting
|
||||
* page offset and we can reuse this buffer as-is
|
||||
*/
|
||||
prefetchw(skb->data);
|
||||
|
||||
skb_record_rx_queue(skb, rx_ring->q_index);
|
||||
} else {
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
|
||||
rx_buf->page_offset,
|
||||
ICE_RXBUF_2048,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
rx_buf->skb = NULL;
|
||||
rx_buf->pagecnt_bias++;
|
||||
}
|
||||
|
||||
/* pull page into skb */
|
||||
if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
|
||||
/* hand second half of page back to the ring */
|
||||
ice_reuse_rx_page(rx_ring, rx_buf);
|
||||
rx_ring->rx_stats.page_reuse_count++;
|
||||
} else {
|
||||
/* we are not reusing the buffer so unmap it */
|
||||
dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
/* clear contents of buffer_info */
|
||||
rx_buf->page = NULL;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_pull_tail - ice specific version of skb_pull_tail
|
||||
* @skb: pointer to current skb being adjusted
|
||||
* ice_put_rx_buf - Clean up used buffer and either recycle or free
|
||||
* @rx_ring: Rx descriptor ring to transact packets on
|
||||
* @rx_buf: Rx buffer to pull data from
|
||||
*
|
||||
* This function is an ice specific version of __pskb_pull_tail. The
|
||||
* main difference between this version and the original function is that
|
||||
* this function can make several assumptions about the state of things
|
||||
* that allow for significant optimizations versus the standard function.
|
||||
* As a result we can do things like drop a frag and maintain an accurate
|
||||
* truesize for the skb.
|
||||
* This function will clean up the contents of the rx_buf. It will
|
||||
* either recycle the buffer or unmap it and free the associated resources.
|
||||
*/
|
||||
static void ice_pull_tail(struct sk_buff *skb)
|
||||
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
|
||||
{
|
||||
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
||||
unsigned int pull_len;
|
||||
unsigned char *va;
|
||||
/* hand second half of page back to the ring */
|
||||
if (ice_can_reuse_rx_page(rx_buf)) {
|
||||
ice_reuse_rx_page(rx_ring, rx_buf);
|
||||
rx_ring->rx_stats.page_reuse_count++;
|
||||
} else {
|
||||
/* we are not reusing the buffer so unmap it */
|
||||
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
|
||||
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
|
||||
}
|
||||
|
||||
/* it is valid to use page_address instead of kmap since we are
|
||||
* working with pages allocated out of the lomem pool per
|
||||
* alloc_page(GFP_ATOMIC)
|
||||
*/
|
||||
va = skb_frag_address(frag);
|
||||
|
||||
/* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
|
||||
|
||||
/* update all of the pointers */
|
||||
skb_frag_size_sub(frag, pull_len);
|
||||
frag->page_offset += pull_len;
|
||||
skb->data_len -= pull_len;
|
||||
skb->tail += pull_len;
|
||||
/* clear contents of buffer_info */
|
||||
rx_buf->page = NULL;
|
||||
rx_buf->skb = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -730,10 +767,6 @@ static void ice_pull_tail(struct sk_buff *skb)
|
||||
*/
|
||||
static bool ice_cleanup_headers(struct sk_buff *skb)
|
||||
{
|
||||
/* place header in linear portion of buffer */
|
||||
if (skb_is_nonlinear(skb))
|
||||
ice_pull_tail(skb);
|
||||
|
||||
/* if eth_skb_pad returns an error the skb was freed */
|
||||
if (eth_skb_pad(skb))
|
||||
return true;
|
||||
@@ -751,8 +784,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
|
||||
* The status_error_len doesn't need to be shifted because it begins
|
||||
* at offset zero.
|
||||
*/
|
||||
static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
|
||||
const u16 stat_err_bits)
|
||||
static bool
|
||||
ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
|
||||
{
|
||||
return !!(rx_desc->wb.status_error0 &
|
||||
cpu_to_le16(stat_err_bits));
|
||||
@@ -769,9 +802,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
|
||||
* sk_buff in the next buffer to be chained and return true indicating
|
||||
* that this is in fact a non-EOP buffer.
|
||||
*/
|
||||
static bool ice_is_non_eop(struct ice_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
static bool
|
||||
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u32 ntc = rx_ring->next_to_clean + 1;
|
||||
|
||||
@@ -838,8 +871,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
|
||||
*
|
||||
* skb->protocol must be set before this function is called
|
||||
*/
|
||||
static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
|
||||
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
|
||||
static void
|
||||
ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
|
||||
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
|
||||
{
|
||||
struct ice_rx_ptype_decoded decoded;
|
||||
u32 rx_error, rx_status;
|
||||
@@ -909,9 +943,10 @@ checksum_fail:
|
||||
* order to populate the hash, checksum, VLAN, protocol, and
|
||||
* other fields within the skb.
|
||||
*/
|
||||
static void ice_process_skb_fields(struct ice_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb, u8 ptype)
|
||||
static void
|
||||
ice_process_skb_fields(struct ice_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc,
|
||||
struct sk_buff *skb, u8 ptype)
|
||||
{
|
||||
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
|
||||
|
||||
@@ -925,18 +960,17 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
|
||||
* ice_receive_skb - Send a completed packet up the stack
|
||||
* @rx_ring: Rx ring in play
|
||||
* @skb: packet to send up
|
||||
* @vlan_tag: vlan tag for packet
|
||||
* @vlan_tag: VLAN tag for packet
|
||||
*
|
||||
* This function sends the completed packet (via. skb) up the stack using
|
||||
* gro receive functions (with/without vlan tag)
|
||||
* gro receive functions (with/without VLAN tag)
|
||||
*/
|
||||
static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
|
||||
u16 vlan_tag)
|
||||
static void
|
||||
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
|
||||
{
|
||||
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
(vlan_tag & VLAN_VID_MASK)) {
|
||||
(vlan_tag & VLAN_VID_MASK))
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
}
|
||||
napi_gro_receive(&rx_ring->q_vector->napi, skb);
|
||||
}
|
||||
|
||||
@@ -958,10 +992,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
|
||||
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
|
||||
bool failure = false;
|
||||
|
||||
/* start the loop to process RX packets bounded by 'budget' */
|
||||
/* start the loop to process Rx packets bounded by 'budget' */
|
||||
while (likely(total_rx_pkts < (unsigned int)budget)) {
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
struct ice_rx_buf *rx_buf;
|
||||
struct sk_buff *skb;
|
||||
unsigned int size;
|
||||
u16 stat_err_bits;
|
||||
u16 vlan_tag = 0;
|
||||
u8 rx_ptype;
|
||||
@@ -973,7 +1009,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
|
||||
cleaned_count = 0;
|
||||
}
|
||||
|
||||
/* get the RX desc from RX ring based on 'next_to_clean' */
|
||||
/* get the Rx desc from Rx ring based on 'next_to_clean' */
|
||||
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
||||
|
||||
/* status_error_len will always be zero for unused descriptors
|
||||
@@ -991,11 +1027,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
/* allocate (if needed) and populate skb */
|
||||
skb = ice_fetch_rx_buf(rx_ring, rx_desc);
|
||||
if (!skb)
|
||||
break;
|
||||
size = le16_to_cpu(rx_desc->wb.pkt_len) &
|
||||
ICE_RX_FLX_DESC_PKT_LEN_M;
|
||||
|
||||
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
|
||||
/* allocate (if needed) and populate skb */
|
||||
if (skb)
|
||||
ice_add_rx_frag(rx_buf, skb, size);
|
||||
else
|
||||
skb = ice_construct_skb(rx_ring, rx_buf, size);
|
||||
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb) {
|
||||
rx_ring->rx_stats.alloc_buf_failed++;
|
||||
rx_buf->pagecnt_bias++;
|
||||
break;
|
||||
}
|
||||
|
||||
ice_put_rx_buf(rx_ring, rx_buf);
|
||||
cleaned_count++;
|
||||
|
||||
/* skip if it is NOP desc */
|
||||
@@ -1048,18 +1097,248 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
|
||||
return failure ? budget : (int)total_rx_pkts;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
|
||||
* @port_info: port_info structure containing the current link speed
|
||||
* @avg_pkt_size: average size of Tx or Rx packets based on clean routine
|
||||
* @itr: itr value to update
|
||||
*
|
||||
* Calculate how big of an increment should be applied to the ITR value passed
|
||||
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current
|
||||
* link speed.
|
||||
*
|
||||
* The following is a calculation derived from:
|
||||
* wmem_default / (size + overhead) = desired_pkts_per_int
|
||||
* rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
|
||||
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
|
||||
*
|
||||
* Assuming wmem_default is 212992 and overhead is 640 bytes per
|
||||
* packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
|
||||
* formula down to:
|
||||
*
|
||||
* wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
|
||||
* ITR = -------------------------------------------- * --------------
|
||||
* rate pkt_size + 640
|
||||
*/
|
||||
static unsigned int
|
||||
ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
|
||||
unsigned int avg_pkt_size,
|
||||
unsigned int itr)
|
||||
{
|
||||
switch (port_info->phy.link_info.link_speed) {
|
||||
case ICE_AQ_LINK_SPEED_100GB:
|
||||
itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
|
||||
avg_pkt_size + 640);
|
||||
break;
|
||||
case ICE_AQ_LINK_SPEED_50GB:
|
||||
itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
|
||||
avg_pkt_size + 640);
|
||||
break;
|
||||
case ICE_AQ_LINK_SPEED_40GB:
|
||||
itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
|
||||
avg_pkt_size + 640);
|
||||
break;
|
||||
case ICE_AQ_LINK_SPEED_25GB:
|
||||
itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
|
||||
avg_pkt_size + 640);
|
||||
break;
|
||||
case ICE_AQ_LINK_SPEED_20GB:
|
||||
itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
|
||||
avg_pkt_size + 640);
|
||||
break;
|
||||
case ICE_AQ_LINK_SPEED_10GB:
|
||||
/* fall through */
|
||||
default:
|
||||
itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
|
||||
avg_pkt_size + 640);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
|
||||
itr &= ICE_ITR_ADAPTIVE_LATENCY;
|
||||
itr += ICE_ITR_ADAPTIVE_MAX_USECS;
|
||||
}
|
||||
|
||||
return itr;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_update_itr - update the adaptive ITR value based on statistics
|
||||
* @q_vector: structure containing interrupt and ring information
|
||||
* @rc: structure containing ring performance data
|
||||
*
|
||||
* Stores a new ITR value based on packets and byte
|
||||
* counts during the last interrupt. The advantage of per interrupt
|
||||
* computation is faster updates and more accurate ITR for the current
|
||||
* traffic pattern. Constants in this function were computed
|
||||
* based on theoretical maximum wire speed and thresholds were set based
|
||||
* on testing data as well as attempting to minimize response time
|
||||
* while increasing bulk throughput.
|
||||
*/
|
||||
static void
|
||||
ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
|
||||
{
|
||||
unsigned long next_update = jiffies;
|
||||
unsigned int packets, bytes, itr;
|
||||
bool container_is_rx;
|
||||
|
||||
if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
|
||||
return;
|
||||
|
||||
/* If itr_countdown is set it means we programmed an ITR within
|
||||
* the last 4 interrupt cycles. This has a side effect of us
|
||||
* potentially firing an early interrupt. In order to work around
|
||||
* this we need to throw out any data received for a few
|
||||
* interrupts following the update.
|
||||
*/
|
||||
if (q_vector->itr_countdown) {
|
||||
itr = rc->target_itr;
|
||||
goto clear_counts;
|
||||
}
|
||||
|
||||
container_is_rx = (&q_vector->rx == rc);
|
||||
/* For Rx we want to push the delay up and default to low latency.
|
||||
* for Tx we want to pull the delay down and default to high latency.
|
||||
*/
|
||||
itr = container_is_rx ?
|
||||
ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
|
||||
ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
|
||||
|
||||
/* If we didn't update within up to 1 - 2 jiffies we can assume
|
||||
* that either packets are coming in so slow there hasn't been
|
||||
* any work, or that there is so much work that NAPI is dealing
|
||||
* with interrupt moderation and we don't need to do anything.
|
||||
*/
|
||||
if (time_after(next_update, rc->next_update))
|
||||
goto clear_counts;
|
||||
|
||||
packets = rc->total_pkts;
|
||||
bytes = rc->total_bytes;
|
||||
|
||||
if (container_is_rx) {
|
||||
/* If Rx there are 1 to 4 packets and bytes are less than
|
||||
* 9000 assume insufficient data to use bulk rate limiting
|
||||
* approach unless Tx is already in bulk rate limiting. We
|
||||
* are likely latency driven.
|
||||
*/
|
||||
if (packets && packets < 4 && bytes < 9000 &&
|
||||
(q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
|
||||
itr = ICE_ITR_ADAPTIVE_LATENCY;
|
||||
goto adjust_by_size_and_speed;
|
||||
}
|
||||
} else if (packets < 4) {
|
||||
/* If we have Tx and Rx ITR maxed and Tx ITR is running in
|
||||
* bulk mode and we are receiving 4 or fewer packets just
|
||||
* reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
|
||||
* that the Rx can relax.
|
||||
*/
|
||||
if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
|
||||
(q_vector->rx.target_itr & ICE_ITR_MASK) ==
|
||||
ICE_ITR_ADAPTIVE_MAX_USECS)
|
||||
goto clear_counts;
|
||||
} else if (packets > 32) {
|
||||
/* If we have processed over 32 packets in a single interrupt
|
||||
* for Tx assume we need to switch over to "bulk" mode.
|
||||
*/
|
||||
rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
|
||||
}
|
||||
|
||||
/* We have no packets to actually measure against. This means
|
||||
* either one of the other queues on this vector is active or
|
||||
* we are a Tx queue doing TSO with too high of an interrupt rate.
|
||||
*
|
||||
* Between 4 and 56 we can assume that our current interrupt delay
|
||||
* is only slightly too low. As such we should increase it by a small
|
||||
* fixed amount.
|
||||
*/
|
||||
if (packets < 56) {
|
||||
itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
|
||||
if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
|
||||
itr &= ICE_ITR_ADAPTIVE_LATENCY;
|
||||
itr += ICE_ITR_ADAPTIVE_MAX_USECS;
|
||||
}
|
||||
goto clear_counts;
|
||||
}
|
||||
|
||||
if (packets <= 256) {
|
||||
itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
|
||||
itr &= ICE_ITR_MASK;
|
||||
|
||||
/* Between 56 and 112 is our "goldilocks" zone where we are
|
||||
* working out "just right". Just report that our current
|
||||
* ITR is good for us.
|
||||
*/
|
||||
if (packets <= 112)
|
||||
goto clear_counts;
|
||||
|
||||
/* If packet count is 128 or greater we are likely looking
|
||||
* at a slight overrun of the delay we want. Try halving
|
||||
* our delay to see if that will cut the number of packets
|
||||
* in half per interrupt.
|
||||
*/
|
||||
itr >>= 1;
|
||||
itr &= ICE_ITR_MASK;
|
||||
if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
|
||||
itr = ICE_ITR_ADAPTIVE_MIN_USECS;
|
||||
|
||||
goto clear_counts;
|
||||
}
|
||||
|
||||
/* The paths below assume we are dealing with a bulk ITR since
|
||||
* number of packets is greater than 256. We are just going to have
|
||||
* to compute a value and try to bring the count under control,
|
||||
* though for smaller packet sizes there isn't much we can do as
|
||||
* NAPI polling will likely be kicking in sooner rather than later.
|
||||
*/
|
||||
itr = ICE_ITR_ADAPTIVE_BULK;
|
||||
|
||||
adjust_by_size_and_speed:
|
||||
|
||||
/* based on checks above packets cannot be 0 so division is safe */
|
||||
itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
|
||||
bytes / packets, itr);
|
||||
|
||||
clear_counts:
|
||||
/* write back value */
|
||||
rc->target_itr = itr;
|
||||
|
||||
/* next update should occur within next jiffy */
|
||||
rc->next_update = next_update + 1;
|
||||
|
||||
rc->total_bytes = 0;
|
||||
rc->total_pkts = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
|
||||
* @itr_idx: interrupt throttling index
|
||||
* @reg_itr: interrupt throttling value adjusted based on ITR granularity
|
||||
* @itr: interrupt throttling value in usecs
|
||||
*/
|
||||
static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
|
||||
static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
|
||||
{
|
||||
/* The itr value is reported in microseconds, and the register value is
|
||||
* recorded in 2 microsecond units. For this reason we only need to
|
||||
* shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
|
||||
* granularity as a shift instead of division. The mask makes sure the
|
||||
* ITR value is never odd so we don't accidentally write into the field
|
||||
* prior to the ITR field.
|
||||
*/
|
||||
itr &= ICE_ITR_MASK;
|
||||
|
||||
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
|
||||
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
|
||||
(reg_itr << GLINT_DYN_CTL_INTERVAL_S);
|
||||
(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
|
||||
}
|
||||
|
||||
/* The act of updating the ITR will cause it to immediately trigger. In order
|
||||
* to prevent this from throwing off adaptive update statistics we defer the
|
||||
* update so that it can only happen so often. So after either Tx or Rx are
|
||||
* updated we make the adaptive scheme wait until either the ITR completely
|
||||
* expires via the next_update expiration or we have been through at least
|
||||
* 3 interrupts.
|
||||
*/
|
||||
#define ITR_COUNTDOWN_START 3
|
||||
|
||||
/**
|
||||
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
|
||||
* @vsi: the VSI associated with the q_vector
|
||||
@@ -1068,10 +1347,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
|
||||
static void
|
||||
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
|
||||
{
|
||||
struct ice_hw *hw = &vsi->back->hw;
|
||||
struct ice_ring_container *rc;
|
||||
struct ice_ring_container *tx = &q_vector->tx;
|
||||
struct ice_ring_container *rx = &q_vector->rx;
|
||||
u32 itr_val;
|
||||
|
||||
/* This will do nothing if dynamic updates are not enabled */
|
||||
ice_update_itr(q_vector, tx);
|
||||
ice_update_itr(q_vector, rx);
|
||||
|
||||
/* This block of logic allows us to get away with only updating
|
||||
* one ITR value with each interrupt. The idea is to perform a
|
||||
* pseudo-lazy update with the following criteria.
|
||||
@@ -1080,35 +1363,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
|
||||
* 2. If we must reduce an ITR that is given highest priority.
|
||||
* 3. We then give priority to increasing ITR based on amount.
|
||||
*/
|
||||
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
|
||||
rc = &q_vector->rx;
|
||||
if (rx->target_itr < rx->current_itr) {
|
||||
/* Rx ITR needs to be reduced, this is highest priority */
|
||||
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
|
||||
rc->current_itr = rc->target_itr;
|
||||
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
|
||||
((q_vector->rx.target_itr - q_vector->rx.current_itr) <
|
||||
(q_vector->tx.target_itr - q_vector->tx.current_itr))) {
|
||||
rc = &q_vector->tx;
|
||||
itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
|
||||
rx->current_itr = rx->target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else if ((tx->target_itr < tx->current_itr) ||
|
||||
((rx->target_itr - rx->current_itr) <
|
||||
(tx->target_itr - tx->current_itr))) {
|
||||
/* Tx ITR needs to be reduced, this is second priority
|
||||
* Tx ITR needs to be increased more than Rx, fourth priority
|
||||
*/
|
||||
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
|
||||
rc->current_itr = rc->target_itr;
|
||||
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
|
||||
rc = &q_vector->rx;
|
||||
itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
|
||||
tx->current_itr = tx->target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else if (rx->current_itr != rx->target_itr) {
|
||||
/* Rx ITR needs to be increased, third priority */
|
||||
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
|
||||
rc->current_itr = rc->target_itr;
|
||||
itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
|
||||
rx->current_itr = rx->target_itr;
|
||||
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
||||
} else {
|
||||
/* Still have to re-enable the interrupts */
|
||||
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
|
||||
if (q_vector->itr_countdown)
|
||||
q_vector->itr_countdown--;
|
||||
}
|
||||
|
||||
if (!test_bit(__ICE_DOWN, vsi->state)) {
|
||||
int vector = vsi->hw_base_vector + q_vector->v_idx;
|
||||
|
||||
wr32(hw, GLINT_DYN_CTL(vector), itr_val);
|
||||
}
|
||||
if (!test_bit(__ICE_DOWN, vsi->state))
|
||||
wr32(&vsi->back->hw,
|
||||
GLINT_DYN_CTL(q_vector->reg_idx),
|
||||
itr_val);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1354,7 +1638,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
|
||||
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
||||
|
||||
/* notify HW of packet */
|
||||
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
|
||||
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
|
||||
writel(i, tx_ring->tail);
|
||||
}
|
||||
|
||||
@@ -1475,7 +1759,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
|
||||
* ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
|
||||
* @tx_ring: ring to send buffer on
|
||||
* @first: pointer to struct ice_tx_buf
|
||||
*
|
||||
@@ -1501,7 +1785,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
|
||||
* to the encapsulated ethertype.
|
||||
*/
|
||||
skb->protocol = vlan_get_protocol(skb);
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* if we have a HW VLAN tag being added, default to the HW one */
|
||||
@@ -1523,8 +1807,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
|
||||
first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1561,6 +1844,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* cppcheck-suppress unreadVariable */
|
||||
ip.hdr = skb_network_header(skb);
|
||||
l4.hdr = skb_transport_header(skb);
|
||||
|
||||
|
@@ -45,8 +45,13 @@
|
||||
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
|
||||
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
|
||||
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
|
||||
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
|
||||
#define ICE_TX_FLAGS_VLAN_PR_S 29
|
||||
#define ICE_TX_FLAGS_VLAN_S 16
|
||||
|
||||
#define ICE_RX_DMA_ATTR \
|
||||
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
|
||||
|
||||
struct ice_tx_buf {
|
||||
struct ice_tx_desc *next_to_watch;
|
||||
struct sk_buff *skb;
|
||||
@@ -73,6 +78,7 @@ struct ice_rx_buf {
|
||||
dma_addr_t dma;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
u16 pagecnt_bias;
|
||||
};
|
||||
|
||||
struct ice_q_stats {
|
||||
@@ -124,11 +130,19 @@ enum ice_rx_dtype {
|
||||
#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
|
||||
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
|
||||
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
|
||||
#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
|
||||
#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
|
||||
#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
|
||||
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
|
||||
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
|
||||
|
||||
#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
|
||||
#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
|
||||
#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
|
||||
#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
|
||||
#define ICE_ITR_ADAPTIVE_BULK 0x0000
|
||||
|
||||
#define ICE_DFLT_INTRL 0
|
||||
#define ICE_MAX_INTRL 236
|
||||
|
||||
/* Legacy or Advanced Mode Queue */
|
||||
#define ICE_TX_ADVANCED 0
|
||||
@@ -149,6 +163,9 @@ struct ice_ring {
|
||||
};
|
||||
u16 q_index; /* Queue number of ring */
|
||||
u32 txq_teid; /* Added Tx queue TEID */
|
||||
#ifdef CONFIG_DCB
|
||||
u8 dcb_tc; /* Traffic class of ring */
|
||||
#endif /* CONFIG_DCB */
|
||||
|
||||
u16 count; /* Number of descriptors */
|
||||
u16 reg_idx; /* HW register index of the ring */
|
||||
@@ -173,21 +190,13 @@ struct ice_ring {
|
||||
u16 next_to_alloc;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum ice_latency_range {
|
||||
ICE_LOWEST_LATENCY = 0,
|
||||
ICE_LOW_LATENCY = 1,
|
||||
ICE_BULK_LATENCY = 2,
|
||||
ICE_ULTRA_LATENCY = 3,
|
||||
};
|
||||
|
||||
struct ice_ring_container {
|
||||
/* head of linked-list of rings */
|
||||
struct ice_ring *ring;
|
||||
unsigned long next_update; /* jiffies value of next queue update */
|
||||
unsigned int total_bytes; /* total bytes processed this int */
|
||||
unsigned int total_pkts; /* total packets processed this int */
|
||||
enum ice_latency_range latency_range;
|
||||
int itr_idx; /* index in the interrupt vector */
|
||||
u16 itr_idx; /* index in the interrupt vector */
|
||||
u16 target_itr; /* value in usecs divided by the hw->itr_gran */
|
||||
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
|
||||
/* high bit set means dynamic ITR, rest is used to store user
|
||||
|
@@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
|
||||
/* debug masks - set these bits in hw->debug_mask to control output */
|
||||
#define ICE_DBG_INIT BIT_ULL(1)
|
||||
#define ICE_DBG_LINK BIT_ULL(4)
|
||||
#define ICE_DBG_PHY BIT_ULL(5)
|
||||
#define ICE_DBG_QCTX BIT_ULL(6)
|
||||
#define ICE_DBG_NVM BIT_ULL(7)
|
||||
#define ICE_DBG_LAN BIT_ULL(8)
|
||||
@@ -106,7 +107,7 @@ struct ice_link_status {
|
||||
};
|
||||
|
||||
/* Different reset sources for which a disable queue AQ call has to be made in
|
||||
* order to clean the TX scheduler as a part of the reset
|
||||
* order to clean the Tx scheduler as a part of the reset
|
||||
*/
|
||||
enum ice_disq_rst_src {
|
||||
ICE_NO_RESET = 0,
|
||||
@@ -128,11 +129,11 @@ struct ice_phy_info {
|
||||
struct ice_hw_common_caps {
|
||||
u32 valid_functions;
|
||||
|
||||
/* TX/RX queues */
|
||||
u16 num_rxq; /* Number/Total RX queues */
|
||||
u16 rxq_first_id; /* First queue ID for RX queues */
|
||||
u16 num_txq; /* Number/Total TX queues */
|
||||
u16 txq_first_id; /* First queue ID for TX queues */
|
||||
/* Tx/Rx queues */
|
||||
u16 num_rxq; /* Number/Total Rx queues */
|
||||
u16 rxq_first_id; /* First queue ID for Rx queues */
|
||||
u16 num_txq; /* Number/Total Tx queues */
|
||||
u16 txq_first_id; /* First queue ID for Tx queues */
|
||||
|
||||
/* MSI-X vectors */
|
||||
u16 num_msix_vectors;
|
||||
@@ -147,6 +148,8 @@ struct ice_hw_common_caps {
|
||||
/* RSS related capabilities */
|
||||
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
|
||||
u8 rss_table_entry_width; /* RSS Entry width in bits */
|
||||
|
||||
u8 dcb;
|
||||
};
|
||||
|
||||
/* Function specific capabilities */
|
||||
@@ -209,12 +212,17 @@ struct ice_nvm_info {
|
||||
#define ICE_MAX_TRAFFIC_CLASS 8
|
||||
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
|
||||
|
||||
#define ice_for_each_traffic_class(_i) \
|
||||
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
|
||||
|
||||
#define ICE_INVAL_TEID 0xFFFFFFFF
|
||||
|
||||
struct ice_sched_node {
|
||||
struct ice_sched_node *parent;
|
||||
struct ice_sched_node *sibling; /* next sibling in the same layer */
|
||||
struct ice_sched_node **children;
|
||||
struct ice_aqc_txsched_elem_data info;
|
||||
u32 agg_id; /* aggregator group id */
|
||||
u32 agg_id; /* aggregator group ID */
|
||||
u16 vsi_handle;
|
||||
u8 in_use; /* suspended or in use */
|
||||
u8 tx_sched_layer; /* Logical Layer (1-9) */
|
||||
@@ -241,13 +249,12 @@ enum ice_agg_type {
|
||||
#define ICE_SCHED_DFLT_RL_PROF_ID 0
|
||||
#define ICE_SCHED_DFLT_BW_WT 1
|
||||
|
||||
/* vsi type list entry to locate corresponding vsi/ag nodes */
|
||||
/* VSI type list entry to locate corresponding VSI/ag nodes */
|
||||
struct ice_sched_vsi_info {
|
||||
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
|
||||
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
|
||||
struct list_head list_entry;
|
||||
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
|
||||
u16 vsi_id;
|
||||
};
|
||||
|
||||
/* driver defines the policy */
|
||||
@@ -257,15 +264,70 @@ struct ice_sched_tx_policy {
|
||||
u8 rdma_ena;
|
||||
};
|
||||
|
||||
/* CEE or IEEE 802.1Qaz ETS Configuration data */
|
||||
struct ice_dcb_ets_cfg {
|
||||
u8 willing;
|
||||
u8 cbs;
|
||||
u8 maxtcs;
|
||||
u8 prio_table[ICE_MAX_TRAFFIC_CLASS];
|
||||
u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS];
|
||||
u8 tsatable[ICE_MAX_TRAFFIC_CLASS];
|
||||
};
|
||||
|
||||
/* CEE or IEEE 802.1Qaz PFC Configuration data */
|
||||
struct ice_dcb_pfc_cfg {
|
||||
u8 willing;
|
||||
u8 mbc;
|
||||
u8 pfccap;
|
||||
u8 pfcena;
|
||||
};
|
||||
|
||||
/* CEE or IEEE 802.1Qaz Application Priority data */
|
||||
struct ice_dcb_app_priority_table {
|
||||
u16 prot_id;
|
||||
u8 priority;
|
||||
u8 selector;
|
||||
};
|
||||
|
||||
#define ICE_MAX_USER_PRIORITY 8
|
||||
#define ICE_DCBX_MAX_APPS 32
|
||||
#define ICE_LLDPDU_SIZE 1500
|
||||
#define ICE_TLV_STATUS_OPER 0x1
|
||||
#define ICE_TLV_STATUS_SYNC 0x2
|
||||
#define ICE_TLV_STATUS_ERR 0x4
|
||||
#define ICE_APP_PROT_ID_FCOE 0x8906
|
||||
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
|
||||
#define ICE_APP_PROT_ID_FIP 0x8914
|
||||
#define ICE_APP_SEL_ETHTYPE 0x1
|
||||
#define ICE_APP_SEL_TCPIP 0x2
|
||||
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
|
||||
#define ICE_CEE_APP_SEL_TCPIP 0x1
|
||||
|
||||
struct ice_dcbx_cfg {
|
||||
u32 numapps;
|
||||
u32 tlv_status; /* CEE mode TLV status */
|
||||
struct ice_dcb_ets_cfg etscfg;
|
||||
struct ice_dcb_ets_cfg etsrec;
|
||||
struct ice_dcb_pfc_cfg pfc;
|
||||
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
|
||||
u8 dcbx_mode;
|
||||
#define ICE_DCBX_MODE_CEE 0x1
|
||||
#define ICE_DCBX_MODE_IEEE 0x2
|
||||
u8 app_mode;
|
||||
#define ICE_DCBX_APPS_NON_WILLING 0x1
|
||||
};
|
||||
|
||||
struct ice_port_info {
|
||||
struct ice_sched_node *root; /* Root Node per Port */
|
||||
struct ice_hw *hw; /* back pointer to hw instance */
|
||||
struct ice_hw *hw; /* back pointer to HW instance */
|
||||
u32 last_node_teid; /* scheduler last node info */
|
||||
u16 sw_id; /* Initial switch ID belongs to port */
|
||||
u16 pf_vf_num;
|
||||
u8 port_state;
|
||||
#define ICE_SCHED_PORT_STATE_INIT 0x0
|
||||
#define ICE_SCHED_PORT_STATE_READY 0x1
|
||||
u8 lport;
|
||||
#define ICE_LPORT_MASK 0xff
|
||||
u16 dflt_tx_vsi_rule_id;
|
||||
u16 dflt_tx_vsi_num;
|
||||
u16 dflt_rx_vsi_rule_id;
|
||||
@@ -274,9 +336,14 @@ struct ice_port_info {
|
||||
struct ice_mac_info mac;
|
||||
struct ice_phy_info phy;
|
||||
struct mutex sched_lock; /* protect access to TXSched tree */
|
||||
u8 lport;
|
||||
#define ICE_LPORT_MASK 0xff
|
||||
u8 is_vf;
|
||||
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
|
||||
/* DCBX info */
|
||||
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
|
||||
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
|
||||
/* LLDP/DCBX Status */
|
||||
u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
|
||||
u8 is_sw_lldp:1;
|
||||
u8 is_vf:1;
|
||||
};
|
||||
|
||||
struct ice_switch_info {
|
||||
@@ -320,7 +387,7 @@ struct ice_hw {
|
||||
|
||||
u8 pf_id; /* device profile info */
|
||||
|
||||
/* TX Scheduler values */
|
||||
/* Tx Scheduler values */
|
||||
u16 num_tx_sched_layers;
|
||||
u16 num_tx_sched_phys_layers;
|
||||
u8 flattened_layers;
|
||||
@@ -331,7 +398,7 @@ struct ice_hw {
|
||||
|
||||
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
|
||||
u8 evb_veb; /* true for VEB, false for VEPA */
|
||||
u8 reset_ongoing; /* true if hw is in reset, false otherwise */
|
||||
u8 reset_ongoing; /* true if HW is in reset, false otherwise */
|
||||
struct ice_bus_info bus;
|
||||
struct ice_nvm_info nvm;
|
||||
struct ice_hw_dev_caps dev_caps; /* device capabilities */
|
||||
@@ -410,6 +477,11 @@ struct ice_hw_port_stats {
|
||||
u64 link_xoff_rx; /* lxoffrxc */
|
||||
u64 link_xon_tx; /* lxontxc */
|
||||
u64 link_xoff_tx; /* lxofftxc */
|
||||
u64 priority_xon_rx[8]; /* pxonrxc[8] */
|
||||
u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
|
||||
u64 priority_xon_tx[8]; /* pxontxc[8] */
|
||||
u64 priority_xoff_tx[8]; /* pxofftxc[8] */
|
||||
u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
|
||||
u64 rx_size_64; /* prc64 */
|
||||
u64 rx_size_127; /* prc127 */
|
||||
u64 rx_size_255; /* prc255 */
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -48,10 +48,10 @@ enum ice_virtchnl_cap {
|
||||
struct ice_vf {
|
||||
struct ice_pf *pf;
|
||||
|
||||
s16 vf_id; /* VF id in the PF space */
|
||||
s16 vf_id; /* VF ID in the PF space */
|
||||
u32 driver_caps; /* reported by VF driver */
|
||||
int first_vector_idx; /* first vector index of this VF */
|
||||
struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
|
||||
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
|
||||
struct virtchnl_version_info vf_ver;
|
||||
struct virtchnl_ether_addr dflt_lan_addr;
|
||||
u16 port_vlan_id;
|
||||
@@ -59,10 +59,10 @@ struct ice_vf {
|
||||
u8 trusted;
|
||||
u16 lan_vsi_idx; /* index into PF struct */
|
||||
u16 lan_vsi_num; /* ID as used by firmware */
|
||||
u64 num_mdd_events; /* number of mdd events detected */
|
||||
u64 num_mdd_events; /* number of MDD events detected */
|
||||
u64 num_inval_msgs; /* number of continuous invalid msgs */
|
||||
u64 num_valid_msgs; /* number of valid msgs detected */
|
||||
unsigned long vf_caps; /* vf's adv. capabilities */
|
||||
unsigned long vf_caps; /* VF's adv. capabilities */
|
||||
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
|
||||
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
|
||||
u8 link_forced;
|
||||
@@ -70,6 +70,7 @@ struct ice_vf {
|
||||
u8 spoofchk;
|
||||
u16 num_mac;
|
||||
u16 num_vlan;
|
||||
u16 num_vf_qs; /* num of queue configured per VF */
|
||||
u8 num_req_qs; /* num of queue pairs requested by VF */
|
||||
};
|
||||
|
||||
@@ -77,8 +78,8 @@ struct ice_vf {
|
||||
void ice_process_vflr_event(struct ice_pf *pf);
|
||||
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
|
||||
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
|
||||
int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
|
||||
struct ifla_vf_info *ivi);
|
||||
int
|
||||
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
|
||||
|
||||
void ice_free_vfs(struct ice_pf *pf);
|
||||
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
|
||||
@@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
|
||||
void ice_vc_notify_reset(struct ice_pf *pf);
|
||||
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
|
||||
|
||||
int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
|
||||
u16 vlan_id, u8 qos, __be16 vlan_proto);
|
||||
|
||||
int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
|
||||
int max_tx_rate);
|
||||
int
|
||||
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
|
||||
__be16 vlan_proto);
|
||||
|
||||
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
|
||||
|
||||
@@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ice_set_vf_bw(struct net_device __always_unused *netdev,
|
||||
int __always_unused vf_id, int __always_unused min_tx_rate,
|
||||
int __always_unused max_tx_rate)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
#endif /* _ICE_VIRTCHNL_PF_H_ */
|
||||
|
Reference in New Issue
Block a user