net: allow fallback function to pass netdev

For most of these calls we can just pass NULL through to the fallback
function as the sb_dev. The only cases where we cannot are the cases where
we might be dealing with either an upper device or a driver that would
have configured things to support an sb_dev itself.

The only driver that has any significant change in this patch set should be
ixgbe as we can drop the redundant functionality that existed in both the
ndo_select_queue function and the fallback function that was passed through
to us.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Alexander Duyck
2018-07-09 12:20:04 -04:00
committed by Jeff Kirsher
parent 4f49dec907
commit 8ec56fc3c5
14 changed files with 24 additions and 27 deletions

View File

@@ -3633,8 +3633,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -3659,12 +3659,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
return queue_index;
}
static u16 __netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
return ___netdev_pick_tx(dev, skb, NULL);
}
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
@@ -3685,7 +3679,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
__netdev_pick_tx);
else
queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
queue_index = __netdev_pick_tx(dev, skb, sb_dev);
queue_index = netdev_cap_txqueue(dev, queue_index);
}