rxe_cq.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5. */
  6. #include <linux/vmalloc.h>
  7. #include "rxe.h"
  8. #include "rxe_loc.h"
  9. #include "rxe_queue.h"
  10. int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
  11. int cqe, int comp_vector)
  12. {
  13. int count;
  14. if (cqe <= 0) {
  15. pr_warn("cqe(%d) <= 0\n", cqe);
  16. goto err1;
  17. }
  18. if (cqe > rxe->attr.max_cqe) {
  19. pr_debug("cqe(%d) > max_cqe(%d)\n",
  20. cqe, rxe->attr.max_cqe);
  21. goto err1;
  22. }
  23. if (cq) {
  24. count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
  25. if (cqe < count) {
  26. pr_debug("cqe(%d) < current # elements in queue (%d)",
  27. cqe, count);
  28. goto err1;
  29. }
  30. }
  31. return 0;
  32. err1:
  33. return -EINVAL;
  34. }
  35. static void rxe_send_complete(struct tasklet_struct *t)
  36. {
  37. struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
  38. unsigned long flags;
  39. spin_lock_irqsave(&cq->cq_lock, flags);
  40. if (cq->is_dying) {
  41. spin_unlock_irqrestore(&cq->cq_lock, flags);
  42. return;
  43. }
  44. spin_unlock_irqrestore(&cq->cq_lock, flags);
  45. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  46. }
  47. int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
  48. int comp_vector, struct ib_udata *udata,
  49. struct rxe_create_cq_resp __user *uresp)
  50. {
  51. int err;
  52. enum queue_type type;
  53. type = QUEUE_TYPE_TO_CLIENT;
  54. cq->queue = rxe_queue_init(rxe, &cqe,
  55. sizeof(struct rxe_cqe), type);
  56. if (!cq->queue) {
  57. pr_warn("unable to create cq\n");
  58. return -ENOMEM;
  59. }
  60. err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
  61. cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
  62. if (err) {
  63. vfree(cq->queue->buf);
  64. kfree(cq->queue);
  65. return err;
  66. }
  67. cq->is_user = uresp;
  68. cq->is_dying = false;
  69. tasklet_setup(&cq->comp_task, rxe_send_complete);
  70. spin_lock_init(&cq->cq_lock);
  71. cq->ibcq.cqe = cqe;
  72. return 0;
  73. }
  74. int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
  75. struct rxe_resize_cq_resp __user *uresp,
  76. struct ib_udata *udata)
  77. {
  78. int err;
  79. err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
  80. sizeof(struct rxe_cqe), udata,
  81. uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
  82. if (!err)
  83. cq->ibcq.cqe = cqe;
  84. return err;
  85. }
  86. int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
  87. {
  88. struct ib_event ev;
  89. int full;
  90. void *addr;
  91. unsigned long flags;
  92. spin_lock_irqsave(&cq->cq_lock, flags);
  93. full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
  94. if (unlikely(full)) {
  95. spin_unlock_irqrestore(&cq->cq_lock, flags);
  96. if (cq->ibcq.event_handler) {
  97. ev.device = cq->ibcq.device;
  98. ev.element.cq = &cq->ibcq;
  99. ev.event = IB_EVENT_CQ_ERR;
  100. cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
  101. }
  102. return -EBUSY;
  103. }
  104. addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
  105. memcpy(addr, cqe, sizeof(*cqe));
  106. queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
  107. spin_unlock_irqrestore(&cq->cq_lock, flags);
  108. if ((cq->notify == IB_CQ_NEXT_COMP) ||
  109. (cq->notify == IB_CQ_SOLICITED && solicited)) {
  110. cq->notify = 0;
  111. tasklet_schedule(&cq->comp_task);
  112. }
  113. return 0;
  114. }
  115. void rxe_cq_disable(struct rxe_cq *cq)
  116. {
  117. unsigned long flags;
  118. spin_lock_irqsave(&cq->cq_lock, flags);
  119. cq->is_dying = true;
  120. spin_unlock_irqrestore(&cq->cq_lock, flags);
  121. }
  122. void rxe_cq_cleanup(struct rxe_pool_elem *elem)
  123. {
  124. struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
  125. if (cq->queue)
  126. rxe_queue_cleanup(cq->queue);
  127. }