net: Fix data-races around netdev_max_backlog.

[ Upstream commit 5dcd08cd19912892586c6082d56718333e2d19db ]

While reading netdev_max_backlog, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

While at it, we remove the unnecessary spaces in the doc.

Fixes: 1da177e4c3 ("Linux-2.6.12-rc2")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Kuniyuki Iwashima
2022-08-23 10:46:46 -07:00
committed by Greg Kroah-Hartman
parent b498a1b017
commit 3850060352
5 changed files with 6 additions and 6 deletions

View File

@@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
netdev_max_backlog netdev_max_backlog
------------------ ------------------
Maximum number of packets, queued on the INPUT side, when the interface Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them. receives packets faster than kernel can process them.
netdev_rss_key netdev_rss_key

View File

@@ -4516,7 +4516,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd; struct softnet_data *sd;
unsigned int old_flow, new_flow; unsigned int old_flow, new_flow;
if (qlen < (netdev_max_backlog >> 1)) if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
return false; return false;
sd = this_cpu_ptr(&softnet_data); sd = this_cpu_ptr(&softnet_data);
@@ -4564,7 +4564,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (!netif_running(skb->dev)) if (!netif_running(skb->dev))
goto drop; goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue); qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
if (qlen) { if (qlen) {
enqueue: enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb); __skb_queue_tail(&sd->input_pkt_queue, skb);

View File

@@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells); cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
drop: drop:
atomic_long_inc(&dev->rx_dropped); atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb); kfree_skb(skb);

View File

@@ -170,7 +170,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{ {
struct espintcp_ctx *ctx = espintcp_getctx(sk); struct espintcp_ctx *ctx = espintcp_getctx(sk);
if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog) if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
return -ENOBUFS; return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb); __skb_queue_tail(&ctx->out_queue, skb);

View File

@@ -782,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet); trans = this_cpu_ptr(&xfrm_trans_tasklet);
if (skb_queue_len(&trans->queue) >= netdev_max_backlog) if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
return -ENOBUFS; return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));