vnic_rq.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #include <linux/errno.h>
  7. #include <linux/types.h>
  8. #include <linux/pci.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include "vnic_dev.h"
  12. #include "vnic_rq.h"
  13. static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
  14. {
  15. struct vnic_rq_buf *buf;
  16. unsigned int i, j, count = rq->ring.desc_count;
  17. unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
  18. for (i = 0; i < blks; i++) {
  19. rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
  20. if (!rq->bufs[i]) {
  21. printk(KERN_ERR "Failed to alloc rq_bufs\n");
  22. return -ENOMEM;
  23. }
  24. }
  25. for (i = 0; i < blks; i++) {
  26. buf = rq->bufs[i];
  27. for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
  28. buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
  29. buf->desc = (u8 *)rq->ring.descs +
  30. rq->ring.desc_size * buf->index;
  31. if (buf->index + 1 == count) {
  32. buf->next = rq->bufs[0];
  33. break;
  34. } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
  35. buf->next = rq->bufs[i + 1];
  36. } else {
  37. buf->next = buf + 1;
  38. buf++;
  39. }
  40. }
  41. }
  42. rq->to_use = rq->to_clean = rq->bufs[0];
  43. rq->buf_index = 0;
  44. return 0;
  45. }
  46. void vnic_rq_free(struct vnic_rq *rq)
  47. {
  48. struct vnic_dev *vdev;
  49. unsigned int i;
  50. vdev = rq->vdev;
  51. vnic_dev_free_desc_ring(vdev, &rq->ring);
  52. for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
  53. kfree(rq->bufs[i]);
  54. rq->bufs[i] = NULL;
  55. }
  56. rq->ctrl = NULL;
  57. }
  58. int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
  59. unsigned int desc_count, unsigned int desc_size)
  60. {
  61. int err;
  62. rq->index = index;
  63. rq->vdev = vdev;
  64. rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
  65. if (!rq->ctrl) {
  66. printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
  67. return -EINVAL;
  68. }
  69. vnic_rq_disable(rq);
  70. err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
  71. if (err)
  72. return err;
  73. err = vnic_rq_alloc_bufs(rq);
  74. if (err) {
  75. vnic_rq_free(rq);
  76. return err;
  77. }
  78. return 0;
  79. }
  80. void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
  81. unsigned int error_interrupt_enable,
  82. unsigned int error_interrupt_offset)
  83. {
  84. u64 paddr;
  85. u32 fetch_index;
  86. paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
  87. writeq(paddr, &rq->ctrl->ring_base);
  88. iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
  89. iowrite32(cq_index, &rq->ctrl->cq_index);
  90. iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
  91. iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
  92. iowrite32(0, &rq->ctrl->dropped_packet_count);
  93. iowrite32(0, &rq->ctrl->error_status);
  94. /* Use current fetch_index as the ring starting point */
  95. fetch_index = ioread32(&rq->ctrl->fetch_index);
  96. rq->to_use = rq->to_clean =
  97. &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
  98. [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
  99. iowrite32(fetch_index, &rq->ctrl->posted_index);
  100. rq->buf_index = 0;
  101. }
  102. unsigned int vnic_rq_error_status(struct vnic_rq *rq)
  103. {
  104. return ioread32(&rq->ctrl->error_status);
  105. }
  106. void vnic_rq_enable(struct vnic_rq *rq)
  107. {
  108. iowrite32(1, &rq->ctrl->enable);
  109. }
  110. int vnic_rq_disable(struct vnic_rq *rq)
  111. {
  112. unsigned int wait;
  113. iowrite32(0, &rq->ctrl->enable);
  114. /* Wait for HW to ACK disable request */
  115. for (wait = 0; wait < 100; wait++) {
  116. if (!(ioread32(&rq->ctrl->running)))
  117. return 0;
  118. udelay(1);
  119. }
  120. printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
  121. return -ETIMEDOUT;
  122. }
  123. void vnic_rq_clean(struct vnic_rq *rq,
  124. void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
  125. {
  126. struct vnic_rq_buf *buf;
  127. u32 fetch_index;
  128. WARN_ON(ioread32(&rq->ctrl->enable));
  129. buf = rq->to_clean;
  130. while (vnic_rq_desc_used(rq) > 0) {
  131. (*buf_clean)(rq, buf);
  132. buf = rq->to_clean = buf->next;
  133. rq->ring.desc_avail++;
  134. }
  135. /* Use current fetch_index as the ring starting point */
  136. fetch_index = ioread32(&rq->ctrl->fetch_index);
  137. rq->to_use = rq->to_clean =
  138. &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
  139. [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
  140. iowrite32(fetch_index, &rq->ctrl->posted_index);
  141. rq->buf_index = 0;
  142. vnic_dev_clear_desc_ring(&rq->ring);
  143. }