Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
||||
NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM |
|
||||
NETIF_F_GSO_PARTIAL |
|
||||
NETIF_F_GSO_IPXIP4 |
|
||||
NETIF_F_GSO_IPXIP6 |
|
||||
NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM |
|
||||
NETIF_F_SCTP_CRC |
|
||||
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
||||
/* record features VLANs can make use of */
|
||||
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
|
||||
|
||||
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
|
||||
netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
|
||||
|
||||
hw_features = hw_enc_features |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
|
||||
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
|
||||
|
||||
netdev->hw_features |= hw_features;
|
||||
|
||||
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
|
||||
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
|
||||
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
|
||||
|
||||
#define ICE_MAX_RESET_WAIT 20
|
||||
|
||||
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
|
||||
|
||||
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
|
||||
@@ -189,7 +191,6 @@ struct ice_vsi {
|
||||
u64 tx_linearize;
|
||||
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
|
||||
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
unsigned int current_netdev_flags;
|
||||
u32 tx_restart;
|
||||
u32 tx_busy;
|
||||
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
|
||||
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
|
||||
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
|
||||
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
|
||||
void ice_napi_del(struct ice_vsi *vsi);
|
||||
|
||||
#endif /* _ICE_H_ */
|
||||
|
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
|
||||
/* Attempt to disable FW logging before shutting down control queues */
|
||||
ice_cfg_fw_log(hw, false);
|
||||
ice_shutdown_all_ctrlq(hw);
|
||||
|
||||
/* Clear VSI contexts if not already cleared */
|
||||
ice_clear_all_vsi_ctx(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
|
||||
}
|
||||
|
||||
if (!test_bit(__ICE_DOWN, pf->state)) {
|
||||
/* Give it a little more time to try to come back */
|
||||
/* Give it a little more time to try to come back. If still
|
||||
* down, restart autoneg link or reinitialize the interface.
|
||||
*/
|
||||
msleep(75);
|
||||
if (!test_bit(__ICE_DOWN, pf->state))
|
||||
return ice_nway_reset(netdev);
|
||||
|
||||
ice_down(vsi);
|
||||
ice_up(vsi);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@@ -242,6 +242,8 @@
|
||||
#define GLNVM_ULD 0x000B6008
|
||||
#define GLNVM_ULD_CORER_DONE_M BIT(3)
|
||||
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
|
||||
#define GLPCI_CNF2 0x000BE004
|
||||
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
|
||||
#define PF_FUNC_RID 0x0009E880
|
||||
#define PF_FUNC_RID_FUNC_NUM_S 0
|
||||
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
|
||||
|
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
|
||||
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
|
||||
ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
|
||||
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
|
||||
vsi->back->hw.adminq.sq_last_status);
|
||||
goto err_out;
|
||||
}
|
||||
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
||||
* on this wq
|
||||
*/
|
||||
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
|
||||
ice_napi_del(vsi);
|
||||
unregister_netdev(vsi->netdev);
|
||||
free_netdev(vsi->netdev);
|
||||
vsi->netdev = NULL;
|
||||
|
@@ -1465,7 +1465,7 @@ skip_req_irq:
|
||||
* ice_napi_del - Remove NAPI handler for the VSI
|
||||
* @vsi: VSI for which NAPI handler is to be removed
|
||||
*/
|
||||
static void ice_napi_del(struct ice_vsi *vsi)
|
||||
void ice_napi_del(struct ice_vsi *vsi)
|
||||
{
|
||||
int v_idx;
|
||||
|
||||
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
int ret;
|
||||
|
||||
if (vid >= VLAN_N_VID) {
|
||||
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
|
||||
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
|
||||
|
||||
/* Enable VLAN pruning when VLAN 0 is added */
|
||||
if (unlikely(!vid)) {
|
||||
ret = ice_cfg_vlan_pruning(vsi, true);
|
||||
int ret = ice_cfg_vlan_pruning(vsi, true);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
|
||||
* needed to continue allowing all untagged packets since VLAN prune
|
||||
* list is applied to all packets by the switch
|
||||
*/
|
||||
ret = ice_vsi_add_vlan(vsi, vid);
|
||||
|
||||
if (!ret)
|
||||
set_bit(vid, vsi->active_vlans);
|
||||
|
||||
return ret;
|
||||
return ice_vsi_add_vlan(vsi, vid);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
clear_bit(vid, vsi->active_vlans);
|
||||
|
||||
/* Disable VLAN pruning when VLAN 0 is removed */
|
||||
if (unlikely(!vid))
|
||||
status = ice_cfg_vlan_pruning(vsi, false);
|
||||
@@ -2001,6 +1994,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
|
||||
* @pf: pointer to the PF structure
|
||||
*
|
||||
* There is no error returned here because the driver should be able to handle
|
||||
* 128 Byte cache lines, so we only print a warning in case issues are seen,
|
||||
* specifically with Tx.
|
||||
*/
|
||||
static void ice_verify_cacheline_size(struct ice_pf *pf)
|
||||
{
|
||||
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
|
||||
ICE_CACHE_LINE_BYTES);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_probe - Device initialization routine
|
||||
* @pdev: PCI device information struct
|
||||
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
|
||||
/* since everything is good, start the service timer */
|
||||
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
|
||||
|
||||
ice_verify_cacheline_size(pf);
|
||||
|
||||
return 0;
|
||||
|
||||
err_alloc_sw_unroll:
|
||||
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
|
||||
if (!pf)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
|
||||
if (!ice_is_reset_in_progress(pf->state))
|
||||
break;
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
set_bit(__ICE_DOWN, pf->state);
|
||||
ice_service_task_stop(pf);
|
||||
|
||||
@@ -2509,31 +2526,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
|
||||
* @vsi: the VSI being brought back up
|
||||
*/
|
||||
static int ice_restore_vlan(struct ice_vsi *vsi)
|
||||
{
|
||||
int err;
|
||||
u16 vid;
|
||||
|
||||
if (!vsi->netdev)
|
||||
return -EINVAL;
|
||||
|
||||
err = ice_vsi_vlan_setup(vsi);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
|
||||
err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_cfg - Setup the VSI
|
||||
* @vsi: the VSI being configured
|
||||
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
|
||||
|
||||
if (vsi->netdev) {
|
||||
ice_set_rx_mode(vsi->netdev);
|
||||
err = ice_restore_vlan(vsi);
|
||||
|
||||
err = ice_vsi_vlan_setup(vsi);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
enum ice_status ret;
|
||||
int err;
|
||||
int err, i;
|
||||
|
||||
if (test_bit(__ICE_DOWN, pf->state))
|
||||
goto clear_recovery;
|
||||
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
|
||||
}
|
||||
|
||||
ice_reset_all_vfs(pf, true);
|
||||
|
||||
for (i = 0; i < pf->num_alloc_vsi; i++) {
|
||||
bool link_up;
|
||||
|
||||
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
|
||||
continue;
|
||||
ice_get_link_status(pf->vsi[i]->port_info, &link_up);
|
||||
if (link_up) {
|
||||
netif_carrier_on(pf->vsi[i]->netdev);
|
||||
netif_tx_wake_all_queues(pf->vsi[i]->netdev);
|
||||
} else {
|
||||
netif_carrier_off(pf->vsi[i]->netdev);
|
||||
netif_tx_stop_all_queues(pf->vsi[i]->netdev);
|
||||
}
|
||||
}
|
||||
|
||||
/* if we get here, reset flow is successful */
|
||||
clear_bit(__ICE_RESET_FAILED, pf->state);
|
||||
return;
|
||||
|
@@ -347,6 +347,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clear_all_vsi_ctx - clear all the VSI context entries
|
||||
* @hw: pointer to the hw struct
|
||||
*/
|
||||
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
for (i = 0; i < ICE_MAX_VSI; i++)
|
||||
ice_clear_vsi_ctx(hw, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_add_vsi - add VSI context to the hardware and VSI handle list
|
||||
* @hw: pointer to the hw struct
|
||||
|
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
|
||||
struct ice_sq_cd *cd);
|
||||
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
|
||||
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
|
||||
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
|
||||
/* Switch config */
|
||||
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
|
||||
|
||||
/* Switch/bridge related commands */
|
||||
|
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
|
||||
|
||||
/* update gso_segs and bytecount */
|
||||
first->gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
first->bytecount = (first->gso_segs - 1) * off->header_len;
|
||||
first->bytecount += (first->gso_segs - 1) * off->header_len;
|
||||
|
||||
cd_tso_len = skb->len - off->header_len;
|
||||
cd_mss = skb_shinfo(skb)->gso_size;
|
||||
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
|
||||
* magnitude greater than our largest possible GSO size.
|
||||
*
|
||||
* This would then be implemented as:
|
||||
* return (((size >> 12) * 85) >> 8) + 1;
|
||||
* return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
|
||||
*
|
||||
* Since multiplication and division are commutative, we can reorder
|
||||
* operations into:
|
||||
* return ((size * 85) >> 20) + 1;
|
||||
* return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
|
||||
*/
|
||||
static unsigned int ice_txd_use_count(unsigned int size)
|
||||
{
|
||||
return ((size * 85) >> 20) + 1;
|
||||
return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
|
||||
* + 1 desc for context descriptor,
|
||||
* otherwise try next time
|
||||
*/
|
||||
if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
|
||||
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
|
||||
ICE_DESCS_FOR_CTX_DESC)) {
|
||||
tx_ring->tx_stats.tx_busy++;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
@@ -22,8 +22,21 @@
|
||||
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
|
||||
#define ICE_MAX_TXQ_PER_TXQG 128
|
||||
|
||||
/* Tx Descriptors needed, worst case */
|
||||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
/* We are assuming that the cache line is always 64 Bytes here for ice.
|
||||
* In order to make sure that is a correct assumption there is a check in probe
|
||||
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
|
||||
* size is 128 bytes. We do it this way because we do not want to read the
|
||||
* GLPCI_CNF2 register or a variable containing the value on every pass through
|
||||
* the Tx path.
|
||||
*/
|
||||
#define ICE_CACHE_LINE_BYTES 64
|
||||
#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
|
||||
sizeof(struct ice_tx_desc))
|
||||
#define ICE_DESCS_FOR_CTX_DESC 1
|
||||
#define ICE_DESCS_FOR_SKB_DATA_PTR 1
|
||||
/* Tx descriptors needed, worst case */
|
||||
#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
|
||||
ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
|
||||
#define ICE_DESC_UNUSED(R) \
|
||||
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
|
||||
(R)->next_to_clean - (R)->next_to_use - 1)
|
||||
|
@@ -92,12 +92,12 @@ struct ice_link_status {
|
||||
u64 phy_type_low;
|
||||
u16 max_frame_size;
|
||||
u16 link_speed;
|
||||
u16 req_speeds;
|
||||
u8 lse_ena; /* Link Status Event notification */
|
||||
u8 link_info;
|
||||
u8 an_info;
|
||||
u8 ext_info;
|
||||
u8 pacing;
|
||||
u8 req_speeds;
|
||||
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
|
||||
* ice_aqc_get_phy_caps structure
|
||||
*/
|
||||
|
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
|
||||
struct ice_vsi_ctx ctxt = { 0 };
|
||||
enum ice_status status;
|
||||
|
||||
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
|
||||
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
|
||||
ICE_AQ_VSI_PVLAN_INSERT_PVID |
|
||||
ICE_AQ_VSI_VLAN_EMOD_STR;
|
||||
ctxt.info.pvid = cpu_to_le16(vid);
|
||||
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
|
||||
if (!ice_vsi_add_vlan(vsi, vid)) {
|
||||
vf->num_vlan++;
|
||||
set_bit(vid, vsi->active_vlans);
|
||||
|
||||
/* Enable VLAN pruning when VLAN 0 is added */
|
||||
if (unlikely(!vid))
|
||||
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
*/
|
||||
if (!ice_vsi_kill_vlan(vsi, vid)) {
|
||||
vf->num_vlan--;
|
||||
clear_bit(vid, vsi->active_vlans);
|
||||
|
||||
/* Disable VLAN pruning when removing VLAN 0 */
|
||||
if (unlikely(!vid))
|
||||
|
@@ -53,13 +53,15 @@
|
||||
* 2^40 * 10^-9 / 60 = 18.3 minutes.
|
||||
*
|
||||
* SYSTIM is converted to real time using a timecounter. As
|
||||
* timecounter_cyc2time() allows old timestamps, the timecounter
|
||||
* needs to be updated at least once per half of the SYSTIM interval.
|
||||
* Scheduling of delayed work is not very accurate, so we aim for 8
|
||||
* minutes to be sure the actual interval is shorter than 9.16 minutes.
|
||||
* timecounter_cyc2time() allows old timestamps, the timecounter needs
|
||||
* to be updated at least once per half of the SYSTIM interval.
|
||||
* Scheduling of delayed work is not very accurate, and also the NIC
|
||||
* clock can be adjusted to run up to 6% faster and the system clock
|
||||
* up to 10% slower, so we aim for 6 minutes to be sure the actual
|
||||
* interval in the NIC time is shorter than 9.16 minutes.
|
||||
*/
|
||||
|
||||
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
|
||||
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6)
|
||||
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
|
||||
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
|
||||
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
|
||||
|
Reference in New Issue
Block a user