xen/netfront: read response from backend only once
commit 8446066bf8c1f9f7b7412c43fbea0fb87464d75b upstream. In order to avoid problems in case the backend is modifying a response on the ring page while the frontend has already seen it, just read the response into a local buffer in one go and then operate on that buffer only. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
1ffb20f052
commit
f5e4937098
@@ -399,13 +399,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|||||||
rmb(); /* Ensure we see responses up to 'rp'. */
|
rmb(); /* Ensure we see responses up to 'rp'. */
|
||||||
|
|
||||||
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
|
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
|
||||||
struct xen_netif_tx_response *txrsp;
|
struct xen_netif_tx_response txrsp;
|
||||||
|
|
||||||
txrsp = RING_GET_RESPONSE(&queue->tx, cons);
|
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
|
||||||
if (txrsp->status == XEN_NETIF_RSP_NULL)
|
if (txrsp.status == XEN_NETIF_RSP_NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
id = txrsp->id;
|
id = txrsp.id;
|
||||||
skb = queue->tx_skbs[id].skb;
|
skb = queue->tx_skbs[id].skb;
|
||||||
if (unlikely(gnttab_query_foreign_access(
|
if (unlikely(gnttab_query_foreign_access(
|
||||||
queue->grant_tx_ref[id]) != 0)) {
|
queue->grant_tx_ref[id]) != 0)) {
|
||||||
@@ -816,7 +816,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
|||||||
RING_IDX rp)
|
RING_IDX rp)
|
||||||
|
|
||||||
{
|
{
|
||||||
struct xen_netif_extra_info *extra;
|
struct xen_netif_extra_info extra;
|
||||||
struct device *dev = &queue->info->netdev->dev;
|
struct device *dev = &queue->info->netdev->dev;
|
||||||
RING_IDX cons = queue->rx.rsp_cons;
|
RING_IDX cons = queue->rx.rsp_cons;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@@ -832,24 +832,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
extra = (struct xen_netif_extra_info *)
|
RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
|
||||||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
|
||||||
|
|
||||||
if (unlikely(!extra->type ||
|
if (unlikely(!extra.type ||
|
||||||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
dev_warn(dev, "Invalid extra type: %d\n",
|
dev_warn(dev, "Invalid extra type: %d\n",
|
||||||
extra->type);
|
extra.type);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
memcpy(&extras[extra->type - 1], extra,
|
extras[extra.type - 1] = extra;
|
||||||
sizeof(*extra));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = xennet_get_rx_skb(queue, cons);
|
skb = xennet_get_rx_skb(queue, cons);
|
||||||
ref = xennet_get_rx_ref(queue, cons);
|
ref = xennet_get_rx_ref(queue, cons);
|
||||||
xennet_move_rx_slot(queue, skb, ref);
|
xennet_move_rx_slot(queue, skb, ref);
|
||||||
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
||||||
|
|
||||||
queue->rx.rsp_cons = cons;
|
queue->rx.rsp_cons = cons;
|
||||||
return err;
|
return err;
|
||||||
@@ -907,7 +905,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
|||||||
struct sk_buff_head *list,
|
struct sk_buff_head *list,
|
||||||
bool *need_xdp_flush)
|
bool *need_xdp_flush)
|
||||||
{
|
{
|
||||||
struct xen_netif_rx_response *rx = &rinfo->rx;
|
struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
|
||||||
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
|
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
|
||||||
RING_IDX cons = queue->rx.rsp_cons;
|
RING_IDX cons = queue->rx.rsp_cons;
|
||||||
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
|
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
|
||||||
@@ -991,7 +989,8 @@ next:
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
|
RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
|
||||||
|
rx = &rx_local;
|
||||||
skb = xennet_get_rx_skb(queue, cons + slots);
|
skb = xennet_get_rx_skb(queue, cons + slots);
|
||||||
ref = xennet_get_rx_ref(queue, cons + slots);
|
ref = xennet_get_rx_ref(queue, cons + slots);
|
||||||
slots++;
|
slots++;
|
||||||
@@ -1046,10 +1045,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
|||||||
struct sk_buff *nskb;
|
struct sk_buff *nskb;
|
||||||
|
|
||||||
while ((nskb = __skb_dequeue(list))) {
|
while ((nskb = __skb_dequeue(list))) {
|
||||||
struct xen_netif_rx_response *rx =
|
struct xen_netif_rx_response rx;
|
||||||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
|
||||||
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||||
|
|
||||||
|
RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
|
||||||
|
|
||||||
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
||||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||||
|
|
||||||
@@ -1064,7 +1064,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
|||||||
|
|
||||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||||
skb_frag_page(nfrag),
|
skb_frag_page(nfrag),
|
||||||
rx->offset, rx->status, PAGE_SIZE);
|
rx.offset, rx.status, PAGE_SIZE);
|
||||||
|
|
||||||
skb_shinfo(nskb)->nr_frags = 0;
|
skb_shinfo(nskb)->nr_frags = 0;
|
||||||
kfree_skb(nskb);
|
kfree_skb(nskb);
|
||||||
@@ -1163,7 +1163,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
|||||||
i = queue->rx.rsp_cons;
|
i = queue->rx.rsp_cons;
|
||||||
work_done = 0;
|
work_done = 0;
|
||||||
while ((i != rp) && (work_done < budget)) {
|
while ((i != rp) && (work_done < budget)) {
|
||||||
memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
|
RING_COPY_RESPONSE(&queue->rx, i, rx);
|
||||||
memset(extras, 0, sizeof(rinfo.extras));
|
memset(extras, 0, sizeof(rinfo.extras));
|
||||||
|
|
||||||
err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
|
err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
|
||||||
|
Reference in New Issue
Block a user