r8169: remove nr_frags argument from rtl_tx_slots_avail
[ Upstream commit 83c317d7b36bb3858cf1cb86d2635ec3f3bd6ea3 ] The only time when nr_frags isn't SKB_MAX_FRAGS is when entering rtl8169_start_xmit(). However we can use SKB_MAX_FRAGS also here because when queue isn't stopped there should always be room for MAX_SKB_FRAGS + 1 descriptors. Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com> Link: https://lore.kernel.org/r/3d1f2ad7-31d5-2cac-4f4a-394f8a3cab63@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Stable-dep-of: c71e3a5cffd5 ("r8169: Fix possible ring buffer corruption on fragmented Tx packets.") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
41eeb13459
commit
48833226fb
@@ -4247,13 +4247,12 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
|
static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
|
||||||
unsigned int nr_frags)
|
|
||||||
{
|
{
|
||||||
unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
|
unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
|
||||||
|
|
||||||
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
|
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
|
||||||
return slots_avail > nr_frags;
|
return slots_avail > MAX_SKB_FRAGS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
|
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
|
||||||
@@ -4288,7 +4287,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
txd_first = tp->TxDescArray + entry;
|
txd_first = tp->TxDescArray + entry;
|
||||||
|
|
||||||
if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
|
if (unlikely(!rtl_tx_slots_avail(tp))) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
|
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
|
||||||
goto err_stop_0;
|
goto err_stop_0;
|
||||||
@@ -4333,7 +4332,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
|
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
|
||||||
|
|
||||||
stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
|
stop_queue = !rtl_tx_slots_avail(tp);
|
||||||
if (unlikely(stop_queue)) {
|
if (unlikely(stop_queue)) {
|
||||||
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
||||||
* not miss a ring update when it notices a stopped queue.
|
* not miss a ring update when it notices a stopped queue.
|
||||||
@@ -4348,7 +4347,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
* can't.
|
* can't.
|
||||||
*/
|
*/
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
|
if (rtl_tx_slots_avail(tp))
|
||||||
netif_start_queue(dev);
|
netif_start_queue(dev);
|
||||||
door_bell = true;
|
door_bell = true;
|
||||||
}
|
}
|
||||||
@@ -4502,10 +4501,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
|
|||||||
* ring status.
|
* ring status.
|
||||||
*/
|
*/
|
||||||
smp_store_mb(tp->dirty_tx, dirty_tx);
|
smp_store_mb(tp->dirty_tx, dirty_tx);
|
||||||
if (netif_queue_stopped(dev) &&
|
if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
|
||||||
rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
|
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* 8168 hack: TxPoll requests are lost when the Tx packets are
|
* 8168 hack: TxPoll requests are lost when the Tx packets are
|
||||||
* too close. Let's kick an extra TxPoll request when a burst
|
* too close. Let's kick an extra TxPoll request when a burst
|
||||||
|
Reference in New Issue
Block a user