vnic_wq_copy.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #ifndef _VNIC_WQ_COPY_H_
  7. #define _VNIC_WQ_COPY_H_
  8. #include <linux/pci.h>
  9. #include "vnic_wq.h"
  10. #include "fcpio.h"
  11. #define VNIC_WQ_COPY_MAX 1
  12. struct vnic_wq_copy {
  13. unsigned int index;
  14. struct vnic_dev *vdev;
  15. struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
  16. struct vnic_dev_ring ring;
  17. unsigned to_use_index;
  18. unsigned to_clean_index;
  19. };
  20. static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
  21. {
  22. return wq->ring.desc_avail;
  23. }
  24. static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
  25. {
  26. return wq->ring.desc_count - 1 - wq->ring.desc_avail;
  27. }
  28. static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
  29. {
  30. struct fcpio_host_req *desc = wq->ring.descs;
  31. return &desc[wq->to_use_index];
  32. }
  33. static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
  34. {
  35. ((wq->to_use_index + 1) == wq->ring.desc_count) ?
  36. (wq->to_use_index = 0) : (wq->to_use_index++);
  37. wq->ring.desc_avail--;
  38. /* Adding write memory barrier prevents compiler and/or CPU
  39. * reordering, thus avoiding descriptor posting before
  40. * descriptor is initialized. Otherwise, hardware can read
  41. * stale descriptor fields.
  42. */
  43. wmb();
  44. iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
  45. }
  46. static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
  47. {
  48. unsigned int cnt;
  49. if (wq->to_clean_index <= index)
  50. cnt = (index - wq->to_clean_index) + 1;
  51. else
  52. cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
  53. wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
  54. wq->ring.desc_avail += cnt;
  55. }
  56. static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
  57. u16 completed_index,
  58. void (*q_service)(struct vnic_wq_copy *wq,
  59. struct fcpio_host_req *wq_desc))
  60. {
  61. struct fcpio_host_req *wq_desc = wq->ring.descs;
  62. unsigned int curr_index;
  63. while (1) {
  64. if (q_service)
  65. (*q_service)(wq, &wq_desc[wq->to_clean_index]);
  66. wq->ring.desc_avail++;
  67. curr_index = wq->to_clean_index;
  68. /* increment the to-clean index so that we start
  69. * with an unprocessed index next time we enter the loop
  70. */
  71. ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
  72. (wq->to_clean_index = 0) : (wq->to_clean_index++);
  73. if (curr_index == completed_index)
  74. break;
  75. /* we have cleaned all the entries */
  76. if ((completed_index == (u16)-1) &&
  77. (wq->to_clean_index == wq->to_use_index))
  78. break;
  79. }
  80. }
  81. void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
  82. int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
  83. void vnic_wq_copy_free(struct vnic_wq_copy *wq);
  84. int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
  85. unsigned int index, unsigned int desc_count, unsigned int desc_size);
  86. void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
  87. unsigned int error_interrupt_enable,
  88. unsigned int error_interrupt_offset);
  89. void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
  90. void (*q_clean)(struct vnic_wq_copy *wq,
  91. struct fcpio_host_req *wq_desc));
  92. #endif /* _VNIC_WQ_COPY_H_ */