rx.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * Copyright (c) 2016 Citrix Systems Inc.
  3. * Copyright (c) 2002-2005, K A Fraser
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation; or, when distributed
  8. * separately from the Linux kernel or incorporated into other
  9. * software packages, subject to the following license:
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a copy
  12. * of this source file (the "Software"), to deal in the Software without
  13. * restriction, including without limitation the rights to use, copy, modify,
  14. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  15. * and to permit persons to whom the Software is furnished to do so, subject to
  16. * the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in
  19. * all copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  23. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  24. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  25. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  26. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  27. * IN THE SOFTWARE.
  28. */
  29. #include "common.h"
  30. #include <linux/kthread.h>
  31. #include <xen/xen.h>
  32. #include <xen/events.h>
  33. /*
  34. * Update the needed ring page slots for the first SKB queued.
  35. * Note that any call sequence outside the RX thread calling this function
  36. * needs to wake up the RX thread via a call of xenvif_kick_thread()
  37. * afterwards in order to avoid a race with putting the thread to sleep.
  38. */
  39. static void xenvif_update_needed_slots(struct xenvif_queue *queue,
  40. const struct sk_buff *skb)
  41. {
  42. unsigned int needed = 0;
  43. if (skb) {
  44. needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
  45. if (skb_is_gso(skb))
  46. needed++;
  47. if (skb->sw_hash)
  48. needed++;
  49. }
  50. WRITE_ONCE(queue->rx_slots_needed, needed);
  51. }
  52. static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
  53. {
  54. RING_IDX prod, cons;
  55. unsigned int needed;
  56. needed = READ_ONCE(queue->rx_slots_needed);
  57. if (!needed)
  58. return false;
  59. do {
  60. prod = queue->rx.sring->req_prod;
  61. cons = queue->rx.req_cons;
  62. if (prod - cons >= needed)
  63. return true;
  64. queue->rx.sring->req_event = prod + 1;
  65. /* Make sure event is visible before we check prod
  66. * again.
  67. */
  68. mb();
  69. } while (queue->rx.sring->req_prod != prod);
  70. return false;
  71. }
  72. bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
  73. {
  74. unsigned long flags;
  75. bool ret = true;
  76. spin_lock_irqsave(&queue->rx_queue.lock, flags);
  77. if (queue->rx_queue_len >= queue->rx_queue_max) {
  78. struct net_device *dev = queue->vif->dev;
  79. netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
  80. ret = false;
  81. } else {
  82. if (skb_queue_empty(&queue->rx_queue))
  83. xenvif_update_needed_slots(queue, skb);
  84. __skb_queue_tail(&queue->rx_queue, skb);
  85. queue->rx_queue_len += skb->len;
  86. }
  87. spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
  88. return ret;
  89. }
  90. static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
  91. {
  92. struct sk_buff *skb;
  93. spin_lock_irq(&queue->rx_queue.lock);
  94. skb = __skb_dequeue(&queue->rx_queue);
  95. if (skb) {
  96. xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
  97. queue->rx_queue_len -= skb->len;
  98. if (queue->rx_queue_len < queue->rx_queue_max) {
  99. struct netdev_queue *txq;
  100. txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
  101. netif_tx_wake_queue(txq);
  102. }
  103. }
  104. spin_unlock_irq(&queue->rx_queue.lock);
  105. return skb;
  106. }
  107. static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
  108. {
  109. struct sk_buff *skb;
  110. while ((skb = xenvif_rx_dequeue(queue)) != NULL)
  111. kfree_skb(skb);
  112. }
  113. static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
  114. {
  115. struct sk_buff *skb;
  116. for (;;) {
  117. skb = skb_peek(&queue->rx_queue);
  118. if (!skb)
  119. break;
  120. if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
  121. break;
  122. xenvif_rx_dequeue(queue);
  123. kfree_skb(skb);
  124. queue->vif->dev->stats.rx_dropped++;
  125. }
  126. }
  127. static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
  128. {
  129. unsigned int i;
  130. int notify;
  131. gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
  132. for (i = 0; i < queue->rx_copy.num; i++) {
  133. struct gnttab_copy *op;
  134. op = &queue->rx_copy.op[i];
  135. /* If the copy failed, overwrite the status field in
  136. * the corresponding response.
  137. */
  138. if (unlikely(op->status != GNTST_okay)) {
  139. struct xen_netif_rx_response *rsp;
  140. rsp = RING_GET_RESPONSE(&queue->rx,
  141. queue->rx_copy.idx[i]);
  142. rsp->status = op->status;
  143. }
  144. }
  145. queue->rx_copy.num = 0;
  146. /* Push responses for all completed packets. */
  147. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
  148. if (notify)
  149. notify_remote_via_irq(queue->rx_irq);
  150. __skb_queue_purge(queue->rx_copy.completed);
  151. }
  152. static void xenvif_rx_copy_add(struct xenvif_queue *queue,
  153. struct xen_netif_rx_request *req,
  154. unsigned int offset, void *data, size_t len)
  155. {
  156. struct gnttab_copy *op;
  157. struct page *page;
  158. struct xen_page_foreign *foreign;
  159. if (queue->rx_copy.num == COPY_BATCH_SIZE)
  160. xenvif_rx_copy_flush(queue);
  161. op = &queue->rx_copy.op[queue->rx_copy.num];
  162. page = virt_to_page(data);
  163. op->flags = GNTCOPY_dest_gref;
  164. foreign = xen_page_foreign(page);
  165. if (foreign) {
  166. op->source.domid = foreign->domid;
  167. op->source.u.ref = foreign->gref;
  168. op->flags |= GNTCOPY_source_gref;
  169. } else {
  170. op->source.u.gmfn = virt_to_gfn(data);
  171. op->source.domid = DOMID_SELF;
  172. }
  173. op->source.offset = xen_offset_in_page(data);
  174. op->dest.u.ref = req->gref;
  175. op->dest.domid = queue->vif->domid;
  176. op->dest.offset = offset;
  177. op->len = len;
  178. queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
  179. queue->rx_copy.num++;
  180. }
  181. static unsigned int xenvif_gso_type(struct sk_buff *skb)
  182. {
  183. if (skb_is_gso(skb)) {
  184. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  185. return XEN_NETIF_GSO_TYPE_TCPV4;
  186. else
  187. return XEN_NETIF_GSO_TYPE_TCPV6;
  188. }
  189. return XEN_NETIF_GSO_TYPE_NONE;
  190. }
  191. struct xenvif_pkt_state {
  192. struct sk_buff *skb;
  193. size_t remaining_len;
  194. struct sk_buff *frag_iter;
  195. int frag; /* frag == -1 => frag_iter->head */
  196. unsigned int frag_offset;
  197. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
  198. unsigned int extra_count;
  199. unsigned int slot;
  200. };
  201. static void xenvif_rx_next_skb(struct xenvif_queue *queue,
  202. struct xenvif_pkt_state *pkt)
  203. {
  204. struct sk_buff *skb;
  205. unsigned int gso_type;
  206. skb = xenvif_rx_dequeue(queue);
  207. queue->stats.tx_bytes += skb->len;
  208. queue->stats.tx_packets++;
  209. /* Reset packet state. */
  210. memset(pkt, 0, sizeof(struct xenvif_pkt_state));
  211. pkt->skb = skb;
  212. pkt->frag_iter = skb;
  213. pkt->remaining_len = skb->len;
  214. pkt->frag = -1;
  215. gso_type = xenvif_gso_type(skb);
  216. if ((1 << gso_type) & queue->vif->gso_mask) {
  217. struct xen_netif_extra_info *extra;
  218. extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  219. extra->u.gso.type = gso_type;
  220. extra->u.gso.size = skb_shinfo(skb)->gso_size;
  221. extra->u.gso.pad = 0;
  222. extra->u.gso.features = 0;
  223. extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
  224. extra->flags = 0;
  225. pkt->extra_count++;
  226. }
  227. if (queue->vif->xdp_headroom) {
  228. struct xen_netif_extra_info *extra;
  229. extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
  230. memset(extra, 0, sizeof(struct xen_netif_extra_info));
  231. extra->u.xdp.headroom = queue->vif->xdp_headroom;
  232. extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
  233. extra->flags = 0;
  234. pkt->extra_count++;
  235. }
  236. if (skb->sw_hash) {
  237. struct xen_netif_extra_info *extra;
  238. extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
  239. extra->u.hash.algorithm =
  240. XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
  241. if (skb->l4_hash)
  242. extra->u.hash.type =
  243. skb->protocol == htons(ETH_P_IP) ?
  244. _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
  245. _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
  246. else
  247. extra->u.hash.type =
  248. skb->protocol == htons(ETH_P_IP) ?
  249. _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
  250. _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
  251. *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
  252. extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
  253. extra->flags = 0;
  254. pkt->extra_count++;
  255. }
  256. }
  257. static void xenvif_rx_complete(struct xenvif_queue *queue,
  258. struct xenvif_pkt_state *pkt)
  259. {
  260. /* All responses are ready to be pushed. */
  261. queue->rx.rsp_prod_pvt = queue->rx.req_cons;
  262. __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
  263. }
  264. static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
  265. {
  266. struct sk_buff *frag_iter = pkt->frag_iter;
  267. unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
  268. pkt->frag++;
  269. pkt->frag_offset = 0;
  270. if (pkt->frag >= nr_frags) {
  271. if (frag_iter == pkt->skb)
  272. pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
  273. else
  274. pkt->frag_iter = frag_iter->next;
  275. pkt->frag = -1;
  276. }
  277. }
  278. static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
  279. struct xenvif_pkt_state *pkt,
  280. unsigned int offset, void **data,
  281. size_t *len)
  282. {
  283. struct sk_buff *frag_iter = pkt->frag_iter;
  284. void *frag_data;
  285. size_t frag_len, chunk_len;
  286. BUG_ON(!frag_iter);
  287. if (pkt->frag == -1) {
  288. frag_data = frag_iter->data;
  289. frag_len = skb_headlen(frag_iter);
  290. } else {
  291. skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
  292. frag_data = skb_frag_address(frag);
  293. frag_len = skb_frag_size(frag);
  294. }
  295. frag_data += pkt->frag_offset;
  296. frag_len -= pkt->frag_offset;
  297. chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
  298. chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
  299. xen_offset_in_page(frag_data));
  300. pkt->frag_offset += chunk_len;
  301. /* Advance to next frag? */
  302. if (frag_len == chunk_len)
  303. xenvif_rx_next_frag(pkt);
  304. *data = frag_data;
  305. *len = chunk_len;
  306. }
  307. static void xenvif_rx_data_slot(struct xenvif_queue *queue,
  308. struct xenvif_pkt_state *pkt,
  309. struct xen_netif_rx_request *req,
  310. struct xen_netif_rx_response *rsp)
  311. {
  312. unsigned int offset = queue->vif->xdp_headroom;
  313. unsigned int flags;
  314. do {
  315. size_t len;
  316. void *data;
  317. xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
  318. xenvif_rx_copy_add(queue, req, offset, data, len);
  319. offset += len;
  320. pkt->remaining_len -= len;
  321. } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
  322. if (pkt->remaining_len > 0)
  323. flags = XEN_NETRXF_more_data;
  324. else
  325. flags = 0;
  326. if (pkt->slot == 0) {
  327. struct sk_buff *skb = pkt->skb;
  328. if (skb->ip_summed == CHECKSUM_PARTIAL)
  329. flags |= XEN_NETRXF_csum_blank |
  330. XEN_NETRXF_data_validated;
  331. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  332. flags |= XEN_NETRXF_data_validated;
  333. if (pkt->extra_count != 0)
  334. flags |= XEN_NETRXF_extra_info;
  335. }
  336. rsp->offset = 0;
  337. rsp->flags = flags;
  338. rsp->id = req->id;
  339. rsp->status = (s16)offset;
  340. }
  341. static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
  342. struct xenvif_pkt_state *pkt,
  343. struct xen_netif_rx_request *req,
  344. struct xen_netif_rx_response *rsp)
  345. {
  346. struct xen_netif_extra_info *extra = (void *)rsp;
  347. unsigned int i;
  348. pkt->extra_count--;
  349. for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
  350. if (pkt->extras[i].type) {
  351. *extra = pkt->extras[i];
  352. if (pkt->extra_count != 0)
  353. extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
  354. pkt->extras[i].type = 0;
  355. return;
  356. }
  357. }
  358. BUG();
  359. }
  360. static void xenvif_rx_skb(struct xenvif_queue *queue)
  361. {
  362. struct xenvif_pkt_state pkt;
  363. xenvif_rx_next_skb(queue, &pkt);
  364. queue->last_rx_time = jiffies;
  365. do {
  366. struct xen_netif_rx_request *req;
  367. struct xen_netif_rx_response *rsp;
  368. req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
  369. rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
  370. /* Extras must go after the first data slot */
  371. if (pkt.slot != 0 && pkt.extra_count != 0)
  372. xenvif_rx_extra_slot(queue, &pkt, req, rsp);
  373. else
  374. xenvif_rx_data_slot(queue, &pkt, req, rsp);
  375. queue->rx.req_cons++;
  376. pkt.slot++;
  377. } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
  378. xenvif_rx_complete(queue, &pkt);
  379. }
  380. #define RX_BATCH_SIZE 64
  381. static void xenvif_rx_action(struct xenvif_queue *queue)
  382. {
  383. struct sk_buff_head completed_skbs;
  384. unsigned int work_done = 0;
  385. __skb_queue_head_init(&completed_skbs);
  386. queue->rx_copy.completed = &completed_skbs;
  387. while (xenvif_rx_ring_slots_available(queue) &&
  388. !skb_queue_empty(&queue->rx_queue) &&
  389. work_done < RX_BATCH_SIZE) {
  390. xenvif_rx_skb(queue);
  391. work_done++;
  392. }
  393. /* Flush any pending copies and complete all skbs. */
  394. xenvif_rx_copy_flush(queue);
  395. }
  396. static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
  397. {
  398. RING_IDX prod, cons;
  399. prod = queue->rx.sring->req_prod;
  400. cons = queue->rx.req_cons;
  401. return prod - cons;
  402. }
  403. static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
  404. {
  405. unsigned int needed = READ_ONCE(queue->rx_slots_needed);
  406. return !queue->stalled &&
  407. xenvif_rx_queue_slots(queue) < needed &&
  408. time_after(jiffies,
  409. queue->last_rx_time + queue->vif->stall_timeout);
  410. }
  411. static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
  412. {
  413. unsigned int needed = READ_ONCE(queue->rx_slots_needed);
  414. return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
  415. }
  416. bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
  417. {
  418. return xenvif_rx_ring_slots_available(queue) ||
  419. (queue->vif->stall_timeout &&
  420. (xenvif_rx_queue_stalled(queue) ||
  421. xenvif_rx_queue_ready(queue))) ||
  422. (test_kthread && kthread_should_stop()) ||
  423. queue->vif->disabled;
  424. }
  425. static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
  426. {
  427. struct sk_buff *skb;
  428. long timeout;
  429. skb = skb_peek(&queue->rx_queue);
  430. if (!skb)
  431. return MAX_SCHEDULE_TIMEOUT;
  432. timeout = XENVIF_RX_CB(skb)->expires - jiffies;
  433. return timeout < 0 ? 0 : timeout;
  434. }
  435. /* Wait until the guest Rx thread has work.
  436. *
  437. * The timeout needs to be adjusted based on the current head of the
  438. * queue (and not just the head at the beginning). In particular, if
  439. * the queue is initially empty an infinite timeout is used and this
  440. * needs to be reduced when a skb is queued.
  441. *
  442. * This cannot be done with wait_event_timeout() because it only
  443. * calculates the timeout once.
  444. */
  445. static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
  446. {
  447. DEFINE_WAIT(wait);
  448. if (xenvif_have_rx_work(queue, true))
  449. return;
  450. for (;;) {
  451. long ret;
  452. prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
  453. if (xenvif_have_rx_work(queue, true))
  454. break;
  455. if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
  456. &queue->eoi_pending) &
  457. (NETBK_RX_EOI | NETBK_COMMON_EOI))
  458. xen_irq_lateeoi(queue->rx_irq, 0);
  459. ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
  460. if (!ret)
  461. break;
  462. }
  463. finish_wait(&queue->wq, &wait);
  464. }
  465. static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
  466. {
  467. struct xenvif *vif = queue->vif;
  468. queue->stalled = true;
  469. /* At least one queue has stalled? Disable the carrier. */
  470. spin_lock(&vif->lock);
  471. if (vif->stalled_queues++ == 0) {
  472. netdev_info(vif->dev, "Guest Rx stalled");
  473. netif_carrier_off(vif->dev);
  474. }
  475. spin_unlock(&vif->lock);
  476. }
  477. static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
  478. {
  479. struct xenvif *vif = queue->vif;
  480. queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
  481. queue->stalled = false;
  482. /* All queues are ready? Enable the carrier. */
  483. spin_lock(&vif->lock);
  484. if (--vif->stalled_queues == 0) {
  485. netdev_info(vif->dev, "Guest Rx ready");
  486. netif_carrier_on(vif->dev);
  487. }
  488. spin_unlock(&vif->lock);
  489. }
  490. int xenvif_kthread_guest_rx(void *data)
  491. {
  492. struct xenvif_queue *queue = data;
  493. struct xenvif *vif = queue->vif;
  494. if (!vif->stall_timeout)
  495. xenvif_queue_carrier_on(queue);
  496. for (;;) {
  497. xenvif_wait_for_rx_work(queue);
  498. if (kthread_should_stop())
  499. break;
  500. /* This frontend is found to be rogue, disable it in
  501. * kthread context. Currently this is only set when
  502. * netback finds out frontend sends malformed packet,
  503. * but we cannot disable the interface in softirq
  504. * context so we defer it here, if this thread is
  505. * associated with queue 0.
  506. */
  507. if (unlikely(vif->disabled && queue->id == 0)) {
  508. xenvif_carrier_off(vif);
  509. break;
  510. }
  511. if (!skb_queue_empty(&queue->rx_queue))
  512. xenvif_rx_action(queue);
  513. /* If the guest hasn't provided any Rx slots for a
  514. * while it's probably not responsive, drop the
  515. * carrier so packets are dropped earlier.
  516. */
  517. if (vif->stall_timeout) {
  518. if (xenvif_rx_queue_stalled(queue))
  519. xenvif_queue_carrier_off(queue);
  520. else if (xenvif_rx_queue_ready(queue))
  521. xenvif_queue_carrier_on(queue);
  522. }
  523. /* Queued packets may have foreign pages from other
  524. * domains. These cannot be queued indefinitely as
  525. * this would starve guests of grant refs and transmit
  526. * slots.
  527. */
  528. xenvif_rx_queue_drop_expired(queue);
  529. cond_resched();
  530. }
  531. /* Bin any remaining skbs */
  532. xenvif_rx_queue_purge(queue);
  533. return 0;
  534. }