Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
|
||||
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
|
@@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work)
|
||||
rwi = get_next_rwi(adapter);
|
||||
while (rwi) {
|
||||
if (adapter->state == VNIC_REMOVING ||
|
||||
adapter->state == VNIC_REMOVED)
|
||||
goto out;
|
||||
adapter->state == VNIC_REMOVED) {
|
||||
kfree(rwi);
|
||||
rc = EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
if (adapter->force_reset_recovery) {
|
||||
adapter->force_reset_recovery = false;
|
||||
@@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work)
|
||||
netdev_dbg(adapter->netdev, "Reset failed\n");
|
||||
free_all_rwi(adapter);
|
||||
}
|
||||
out:
|
||||
|
||||
adapter->resetting = false;
|
||||
if (we_lock_rtnl)
|
||||
rtnl_unlock();
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include <net/vxlan.h>
|
||||
#include <net/mpls.h>
|
||||
#include <net/xdp_sock.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include "ixgbe.h"
|
||||
#include "ixgbe_common.h"
|
||||
@@ -2623,7 +2624,7 @@ adjust_by_size:
|
||||
/* 16K ints/sec to 9.2K ints/sec */
|
||||
avg_wire_size *= 15;
|
||||
avg_wire_size += 11452;
|
||||
} else if (avg_wire_size <= 1980) {
|
||||
} else if (avg_wire_size < 1968) {
|
||||
/* 9.2K ints/sec to 8K ints/sec */
|
||||
avg_wire_size *= 5;
|
||||
avg_wire_size += 22420;
|
||||
@@ -2656,6 +2657,8 @@ adjust_by_size:
|
||||
case IXGBE_LINK_SPEED_2_5GB_FULL:
|
||||
case IXGBE_LINK_SPEED_1GB_FULL:
|
||||
case IXGBE_LINK_SPEED_10_FULL:
|
||||
if (avg_wire_size > 8064)
|
||||
avg_wire_size = 8064;
|
||||
itr += DIV_ROUND_UP(avg_wire_size,
|
||||
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
|
||||
IXGBE_ITR_ADAPTIVE_MIN_INC;
|
||||
@@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
#ifdef CONFIG_IXGBE_IPSEC
|
||||
if (secpath_exists(skb) &&
|
||||
if (xfrm_offload(skb) &&
|
||||
!ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
|
||||
goto out_drop;
|
||||
#endif
|
||||
|
@@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
|
||||
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *tx_ring, int napi_budget)
|
||||
{
|
||||
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
|
||||
unsigned int total_packets = 0, total_bytes = 0;
|
||||
u32 i = tx_ring->next_to_clean, xsk_frames = 0;
|
||||
unsigned int budget = q_vector->tx.work_limit;
|
||||
struct xdp_umem *umem = tx_ring->xsk_umem;
|
||||
union ixgbe_adv_tx_desc *tx_desc;
|
||||
struct ixgbe_tx_buffer *tx_bi;
|
||||
bool xmit_done;
|
||||
u32 xsk_frames = 0;
|
||||
|
||||
tx_bi = &tx_ring->tx_buffer_info[i];
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, i);
|
||||
i -= tx_ring->count;
|
||||
tx_bi = &tx_ring->tx_buffer_info[ntc];
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
|
||||
|
||||
do {
|
||||
while (ntc != ntu) {
|
||||
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
|
||||
break;
|
||||
|
||||
@@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
|
||||
tx_bi++;
|
||||
tx_desc++;
|
||||
i++;
|
||||
if (unlikely(!i)) {
|
||||
i -= tx_ring->count;
|
||||
ntc++;
|
||||
if (unlikely(ntc == tx_ring->count)) {
|
||||
ntc = 0;
|
||||
tx_bi = tx_ring->tx_buffer_info;
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
|
||||
}
|
||||
|
||||
/* issue prefetch for next Tx descriptor */
|
||||
prefetch(tx_desc);
|
||||
}
|
||||
|
||||
/* update budget accounting */
|
||||
budget--;
|
||||
} while (likely(budget));
|
||||
|
||||
i += tx_ring->count;
|
||||
tx_ring->next_to_clean = i;
|
||||
tx_ring->next_to_clean = ntc;
|
||||
|
||||
u64_stats_update_begin(&tx_ring->syncp);
|
||||
tx_ring->stats.bytes += total_bytes;
|
||||
@@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
|
||||
}
|
||||
|
||||
xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
|
||||
|
||||
return budget > 0 && xmit_done;
|
||||
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
|
||||
}
|
||||
|
||||
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include "ixgbevf.h"
|
||||
|
||||
@@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
|
||||
first->protocol = vlan_get_protocol(skb);
|
||||
|
||||
#ifdef CONFIG_IXGBEVF_IPSEC
|
||||
if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
|
||||
if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
|
||||
goto out_drop;
|
||||
#endif
|
||||
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
|
||||
|
@@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
|
||||
for (i = 1; i <= dev->caps.num_ports; i++) {
|
||||
if (mlx4_dev_port(dev, i, &port_cap)) {
|
||||
mlx4_err(dev,
|
||||
"QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
|
||||
"QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
|
||||
} else if ((dev->caps.dmfs_high_steer_mode !=
|
||||
MLX4_STEERING_DMFS_A0_DEFAULT) &&
|
||||
(port_cap.dmfs_optimized_state ==
|
||||
|
@@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
|
||||
if (!laddr) {
|
||||
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_BUSY;
|
||||
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
|
||||
|
@@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
|
||||
|
||||
type = cmsg_hdr->type;
|
||||
switch (type) {
|
||||
case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
|
||||
nfp_flower_cmsg_portreify_rx(app, skb);
|
||||
break;
|
||||
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
|
||||
nfp_flower_cmsg_portmod_rx(app, skb);
|
||||
break;
|
||||
@@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct sk_buff_head *skb_head;
|
||||
|
||||
if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
|
||||
type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
|
||||
if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
|
||||
skb_head = &priv->cmsg_skbs_high;
|
||||
else
|
||||
skb_head = &priv->cmsg_skbs_low;
|
||||
@@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
|
||||
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
|
||||
/* Acks from the NFP that the route is added - ignore. */
|
||||
dev_consume_skb_any(skb);
|
||||
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
|
||||
/* Handle REIFY acks outside wq to prevent RTNL conflict. */
|
||||
nfp_flower_cmsg_portreify_rx(app, skb);
|
||||
dev_consume_skb_any(skb);
|
||||
} else {
|
||||
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
|
||||
}
|
||||
|
@@ -713,6 +713,21 @@ struct nv_skb_map {
|
||||
struct nv_skb_map *next_tx_ctx;
|
||||
};
|
||||
|
||||
struct nv_txrx_stats {
|
||||
u64 stat_rx_packets;
|
||||
u64 stat_rx_bytes; /* not always available in HW */
|
||||
u64 stat_rx_missed_errors;
|
||||
u64 stat_rx_dropped;
|
||||
u64 stat_tx_packets; /* not always available in HW */
|
||||
u64 stat_tx_bytes;
|
||||
u64 stat_tx_dropped;
|
||||
};
|
||||
|
||||
#define nv_txrx_stats_inc(member) \
|
||||
__this_cpu_inc(np->txrx_stats->member)
|
||||
#define nv_txrx_stats_add(member, count) \
|
||||
__this_cpu_add(np->txrx_stats->member, (count))
|
||||
|
||||
/*
|
||||
* SMP locking:
|
||||
* All hardware access under netdev_priv(dev)->lock, except the performance
|
||||
@@ -797,10 +812,7 @@ struct fe_priv {
|
||||
|
||||
/* RX software stats */
|
||||
struct u64_stats_sync swstats_rx_syncp;
|
||||
u64 stat_rx_packets;
|
||||
u64 stat_rx_bytes; /* not always available in HW */
|
||||
u64 stat_rx_missed_errors;
|
||||
u64 stat_rx_dropped;
|
||||
struct nv_txrx_stats __percpu *txrx_stats;
|
||||
|
||||
/* media detection workaround.
|
||||
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
|
||||
@@ -826,9 +838,6 @@ struct fe_priv {
|
||||
|
||||
/* TX software stats */
|
||||
struct u64_stats_sync swstats_tx_syncp;
|
||||
u64 stat_tx_packets; /* not always available in HW */
|
||||
u64 stat_tx_bytes;
|
||||
u64 stat_tx_dropped;
|
||||
|
||||
/* msi/msi-x fields */
|
||||
u32 msi_flags;
|
||||
@@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void nv_get_stats(int cpu, struct fe_priv *np,
|
||||
struct rtnl_link_stats64 *storage)
|
||||
{
|
||||
struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
|
||||
unsigned int syncp_start;
|
||||
u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
|
||||
u64 tx_packets, tx_bytes, tx_dropped;
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
|
||||
rx_packets = src->stat_rx_packets;
|
||||
rx_bytes = src->stat_rx_bytes;
|
||||
rx_dropped = src->stat_rx_dropped;
|
||||
rx_missed_errors = src->stat_rx_missed_errors;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
|
||||
|
||||
storage->rx_packets += rx_packets;
|
||||
storage->rx_bytes += rx_bytes;
|
||||
storage->rx_dropped += rx_dropped;
|
||||
storage->rx_missed_errors += rx_missed_errors;
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
|
||||
tx_packets = src->stat_tx_packets;
|
||||
tx_bytes = src->stat_tx_bytes;
|
||||
tx_dropped = src->stat_tx_dropped;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
|
||||
|
||||
storage->tx_packets += tx_packets;
|
||||
storage->tx_bytes += tx_bytes;
|
||||
storage->tx_dropped += tx_dropped;
|
||||
}
|
||||
|
||||
/*
|
||||
* nv_get_stats64: dev->ndo_get_stats64 function
|
||||
* Get latest stats value from the nic.
|
||||
@@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
|
||||
__releases(&netdev_priv(dev)->hwstats_lock)
|
||||
{
|
||||
struct fe_priv *np = netdev_priv(dev);
|
||||
unsigned int syncp_start;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Note: because HW stats are not always available and for
|
||||
@@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
|
||||
*/
|
||||
|
||||
/* software stats */
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
|
||||
storage->rx_packets = np->stat_rx_packets;
|
||||
storage->rx_bytes = np->stat_rx_bytes;
|
||||
storage->rx_dropped = np->stat_rx_dropped;
|
||||
storage->rx_missed_errors = np->stat_rx_missed_errors;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
|
||||
storage->tx_packets = np->stat_tx_packets;
|
||||
storage->tx_bytes = np->stat_tx_bytes;
|
||||
storage->tx_dropped = np->stat_tx_dropped;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
|
||||
for_each_online_cpu(cpu)
|
||||
nv_get_stats(cpu, np, storage);
|
||||
|
||||
/* If the nic supports hw counters then retrieve latest values */
|
||||
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
|
||||
@@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
|
||||
} else {
|
||||
packet_dropped:
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_dropped++;
|
||||
nv_txrx_stats_inc(stat_rx_dropped);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
return 1;
|
||||
}
|
||||
@@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
|
||||
} else {
|
||||
packet_dropped:
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_dropped++;
|
||||
nv_txrx_stats_inc(stat_rx_dropped);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
return 1;
|
||||
}
|
||||
@@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
|
||||
}
|
||||
if (nv_release_txskb(np, &np->tx_skb[i])) {
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
np->tx_skb[i].dma = 0;
|
||||
@@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* on DMA mapping error - drop the packet */
|
||||
dev_kfree_skb_any(skb);
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
dev_kfree_skb_any(skb);
|
||||
np->put_tx_ctx = start_tx_ctx;
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
||||
/* on DMA mapping error - drop the packet */
|
||||
dev_kfree_skb_any(skb);
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
||||
dev_kfree_skb_any(skb);
|
||||
np->put_tx_ctx = start_tx_ctx;
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
|
||||
&& !(flags & NV_TX_RETRYCOUNT_MASK))
|
||||
nv_legacybackoff_reseed(dev);
|
||||
} else {
|
||||
unsigned int len;
|
||||
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_packets++;
|
||||
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_inc(stat_tx_packets);
|
||||
len = np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_add(stat_tx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
bytes_compl += np->get_tx_ctx->skb->len;
|
||||
@@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
|
||||
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
|
||||
nv_legacybackoff_reseed(dev);
|
||||
} else {
|
||||
unsigned int len;
|
||||
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_packets++;
|
||||
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_inc(stat_tx_packets);
|
||||
len = np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_add(stat_tx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
bytes_compl += np->get_tx_ctx->skb->len;
|
||||
@@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
|
||||
nv_legacybackoff_reseed(dev);
|
||||
}
|
||||
} else {
|
||||
unsigned int len;
|
||||
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_packets++;
|
||||
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_inc(stat_tx_packets);
|
||||
len = np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_add(stat_tx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
|
||||
@@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
|
||||
}
|
||||
}
|
||||
|
||||
static void rx_missing_handler(u32 flags, struct fe_priv *np)
|
||||
{
|
||||
if (flags & NV_RX_MISSEDFRAME) {
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
nv_txrx_stats_inc(stat_rx_missed_errors);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
}
|
||||
}
|
||||
|
||||
static int nv_rx_process(struct net_device *dev, int limit)
|
||||
{
|
||||
struct fe_priv *np = netdev_priv(dev);
|
||||
@@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
|
||||
}
|
||||
/* the rest are hard errors */
|
||||
else {
|
||||
if (flags & NV_RX_MISSEDFRAME) {
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_missed_errors++;
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
}
|
||||
rx_missing_handler(flags, np);
|
||||
dev_kfree_skb(skb);
|
||||
goto next_pkt;
|
||||
}
|
||||
@@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
napi_gro_receive(&np->napi, skb);
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_packets++;
|
||||
np->stat_rx_bytes += len;
|
||||
nv_txrx_stats_inc(stat_rx_packets);
|
||||
nv_txrx_stats_add(stat_rx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
next_pkt:
|
||||
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
|
||||
@@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
|
||||
}
|
||||
napi_gro_receive(&np->napi, skb);
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_packets++;
|
||||
np->stat_rx_bytes += len;
|
||||
nv_txrx_stats_inc(stat_rx_packets);
|
||||
nv_txrx_stats_add(stat_rx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
} else {
|
||||
dev_kfree_skb(skb);
|
||||
@@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
|
||||
SET_NETDEV_DEV(dev, &pci_dev->dev);
|
||||
u64_stats_init(&np->swstats_rx_syncp);
|
||||
u64_stats_init(&np->swstats_tx_syncp);
|
||||
np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
|
||||
if (!np->txrx_stats) {
|
||||
pr_err("np->txrx_stats, alloc memory error.\n");
|
||||
err = -ENOMEM;
|
||||
goto out_alloc_percpu;
|
||||
}
|
||||
|
||||
timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
|
||||
timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
|
||||
@@ -6060,6 +6110,8 @@ out_relreg:
|
||||
out_disable:
|
||||
pci_disable_device(pci_dev);
|
||||
out_free:
|
||||
free_percpu(np->txrx_stats);
|
||||
out_alloc_percpu:
|
||||
free_netdev(dev);
|
||||
out:
|
||||
return err;
|
||||
@@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
|
||||
static void nv_remove(struct pci_dev *pci_dev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pci_dev);
|
||||
struct fe_priv *np = netdev_priv(dev);
|
||||
|
||||
free_percpu(np->txrx_stats);
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
|
@@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
|
||||
int ret;
|
||||
u32 reg, val;
|
||||
|
||||
regmap_field_read(gmac->regmap_field, &val);
|
||||
ret = regmap_field_read(gmac->regmap_field, &val);
|
||||
if (ret) {
|
||||
dev_err(priv->device, "Fail to read from regmap field.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg = gmac->variant->default_syscon_value;
|
||||
if (reg != val)
|
||||
dev_warn(priv->device,
|
||||
|
@@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd)
|
||||
|
||||
sp->dev->stats.rx_bytes += count;
|
||||
|
||||
if ((skb = dev_alloc_skb(count)) == NULL)
|
||||
if ((skb = dev_alloc_skb(count + 1)) == NULL)
|
||||
goto out_mem;
|
||||
|
||||
ptr = skb_put(skb, count);
|
||||
ptr = skb_put(skb, count + 1);
|
||||
*ptr++ = cmd; /* KISS command */
|
||||
|
||||
memcpy(ptr, sp->cooked_buf + 1, count);
|
||||
|
@@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
|
||||
* Local device Link partner
|
||||
* Pause AsymDir Pause AsymDir Result
|
||||
* 1 X 1 X TX+RX
|
||||
* 0 1 1 1 RX
|
||||
* 1 1 0 1 TX
|
||||
* 0 1 1 1 TX
|
||||
* 1 1 0 1 RX
|
||||
*/
|
||||
static void phylink_resolve_flow(struct phylink *pl,
|
||||
struct phylink_link_state *state)
|
||||
@@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl,
|
||||
new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
|
||||
else if (pause & MLO_PAUSE_ASYM)
|
||||
new_pause = state->pause & MLO_PAUSE_SYM ?
|
||||
MLO_PAUSE_RX : MLO_PAUSE_TX;
|
||||
MLO_PAUSE_TX : MLO_PAUSE_RX;
|
||||
} else {
|
||||
new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
|
||||
}
|
||||
|
@@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev)
|
||||
}
|
||||
|
||||
static int tun_attach(struct tun_struct *tun, struct file *file,
|
||||
bool skip_filter, bool napi, bool napi_frags)
|
||||
bool skip_filter, bool napi, bool napi_frags,
|
||||
bool publish_tun)
|
||||
{
|
||||
struct tun_file *tfile = file->private_data;
|
||||
struct net_device *dev = tun->dev;
|
||||
@@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
||||
* initialized tfile; otherwise we risk using half-initialized
|
||||
* object.
|
||||
*/
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
if (publish_tun)
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
|
||||
tun->numqueues++;
|
||||
tun_set_real_num_queues(tun);
|
||||
@@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
|
||||
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
|
||||
ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS);
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS);
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
|
||||
if (err < 0)
|
||||
goto err_free_flow;
|
||||
|
||||
err = register_netdevice(tun->dev);
|
||||
if (err < 0)
|
||||
goto err_detach;
|
||||
/* free_netdev() won't check refcnt, to aovid race
|
||||
* with dev_put() we need publish tun after registration.
|
||||
*/
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
}
|
||||
|
||||
netif_carrier_on(tun->dev);
|
||||
@@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
|
||||
tun->flags & IFF_NAPI_FRAGS);
|
||||
tun->flags & IFF_NAPI_FRAGS, true);
|
||||
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
|
||||
|
@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
goto bad_desc;
|
||||
}
|
||||
skip:
|
||||
if (rndis && header.usb_cdc_acm_descriptor &&
|
||||
/* Communcation class functions with bmCapabilities are not
|
||||
* RNDIS. But some Wireless class RNDIS functions use
|
||||
* bmCapabilities for their own purpose. The failsafe is
|
||||
* therefore applied only to Communication class RNDIS
|
||||
* functions. The rndis test is redundant, but a cheap
|
||||
* optimization.
|
||||
*/
|
||||
if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
|
||||
header.usb_cdc_acm_descriptor &&
|
||||
header.usb_cdc_acm_descriptor->bmCapabilities) {
|
||||
dev_dbg(&intf->dev,
|
||||
"ACM capabilities %02x, not really RNDIS?\n",
|
||||
|
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
||||
}
|
||||
}
|
||||
|
||||
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
|
@@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
|
||||
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
|
||||
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
|
||||
|
||||
lmc_trace(dev, "lmc_runnin_reset_out");
|
||||
lmc_trace(dev, "lmc_running_reset_out");
|
||||
}
|
||||
|
||||
|
||||
|
@@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
|
||||
"%d\n", result);
|
||||
result = 0;
|
||||
error_cmd:
|
||||
kfree(cmd);
|
||||
kfree_skb(ack_skb);
|
||||
error_msg_to_dev:
|
||||
error_alloc:
|
||||
|
@@ -1114,18 +1114,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
/* same thing for QuZ... */
|
||||
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
|
||||
if (cfg == &iwl_ax101_cfg_qu_hr)
|
||||
cfg = &iwl_ax101_cfg_quz_hr;
|
||||
else if (cfg == &iwl_ax201_cfg_qu_hr)
|
||||
cfg = &iwl_ax201_cfg_quz_hr;
|
||||
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
||||
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
|
||||
iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
|
||||
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
|
||||
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
|
||||
else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
|
||||
}
|
||||
|
||||
vs_ie = (struct ieee_types_header *)vendor_ie;
|
||||
if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
|
||||
IEEE_MAX_IE_SIZE)
|
||||
return -EINVAL;
|
||||
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
|
||||
vs_ie, vs_ie->len + 2);
|
||||
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
|
||||
|
@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
|
||||
|
||||
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
|
||||
if (rate_ie) {
|
||||
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
|
||||
return;
|
||||
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
|
||||
rate_len = rate_ie->len;
|
||||
}
|
||||
@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
|
||||
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
|
||||
params->beacon.tail,
|
||||
params->beacon.tail_len);
|
||||
if (rate_ie)
|
||||
if (rate_ie) {
|
||||
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
|
||||
return;
|
||||
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
|
||||
params->beacon.tail_len);
|
||||
if (vendor_ie) {
|
||||
wmm_ie = vendor_ie;
|
||||
if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
|
||||
return;
|
||||
memcpy(&bss_cfg->wmm_info, wmm_ie +
|
||||
sizeof(struct ieee_types_header), *(wmm_ie + 1));
|
||||
priv->wmm_enabled = 1;
|
||||
|
@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
|
||||
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
|
||||
}
|
||||
|
||||
if (is_mt7630(dev)) {
|
||||
dev->mt76.cap.has_5ghz = false;
|
||||
dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
|
||||
}
|
||||
|
||||
if (!mt76x02_field_valid(nic_conf1 & 0xff))
|
||||
nic_conf1 &= 0xff00;
|
||||
|
||||
|
@@ -51,6 +51,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
|
||||
mt76x0e_stop_hw(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
if (is_mt7630(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mt76x02_set_key(hw, cmd, vif, sta, key);
|
||||
}
|
||||
|
||||
static void
|
||||
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
u32 queues, bool drop)
|
||||
@@ -67,7 +80,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
|
||||
.configure_filter = mt76x02_configure_filter,
|
||||
.bss_info_changed = mt76x02_bss_info_changed,
|
||||
.sta_state = mt76_sta_state,
|
||||
.set_key = mt76x02_set_key,
|
||||
.set_key = mt76x0e_set_key,
|
||||
.conf_tx = mt76x02_conf_tx,
|
||||
.sw_scan_start = mt76_sw_scan,
|
||||
.sw_scan_complete = mt76x02_sw_scan_complete,
|
||||
|
@@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
|
||||
|
||||
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
|
||||
|
||||
rt2800_register_multiread(rt2x00dev, offset,
|
||||
&iveiv_entry, sizeof(iveiv_entry));
|
||||
if ((crypto->cipher == CIPHER_TKIP) ||
|
||||
(crypto->cipher == CIPHER_TKIP_NO_MIC) ||
|
||||
(crypto->cipher == CIPHER_AES))
|
||||
iveiv_entry.iv[3] |= 0x20;
|
||||
iveiv_entry.iv[3] |= key->keyidx << 6;
|
||||
if (crypto->cmd == SET_KEY) {
|
||||
rt2800_register_multiread(rt2x00dev, offset,
|
||||
&iveiv_entry, sizeof(iveiv_entry));
|
||||
if ((crypto->cipher == CIPHER_TKIP) ||
|
||||
(crypto->cipher == CIPHER_TKIP_NO_MIC) ||
|
||||
(crypto->cipher == CIPHER_AES))
|
||||
iveiv_entry.iv[3] |= 0x20;
|
||||
iveiv_entry.iv[3] |= key->keyidx << 6;
|
||||
} else {
|
||||
memset(&iveiv_entry, 0, sizeof(iveiv_entry));
|
||||
}
|
||||
|
||||
rt2800_register_multiwrite(rt2x00dev, offset,
|
||||
&iveiv_entry, sizeof(iveiv_entry));
|
||||
}
|
||||
@@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
|
||||
switch (rt2x00dev->default_ant.rx_chain_num) {
|
||||
case 3:
|
||||
/* Turn on tertiary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
|
||||
/* fall-through */
|
||||
case 2:
|
||||
/* Turn on secondary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
|
||||
/* fall-through */
|
||||
case 1:
|
||||
/* Turn on primary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -645,7 +645,6 @@ fail_rx:
|
||||
kfree(rsi_dev->tx_buffer);
|
||||
|
||||
fail_eps:
|
||||
kfree(rsi_dev);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
Reference in New Issue
Block a user