rxe_srq.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5. */
  6. #include <linux/vmalloc.h>
  7. #include "rxe.h"
  8. #include "rxe_queue.h"
  9. int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
  10. {
  11. struct ib_srq_attr *attr = &init->attr;
  12. if (attr->max_wr > rxe->attr.max_srq_wr) {
  13. pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
  14. attr->max_wr, rxe->attr.max_srq_wr);
  15. goto err1;
  16. }
  17. if (attr->max_wr <= 0) {
  18. pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
  19. goto err1;
  20. }
  21. if (attr->max_wr < RXE_MIN_SRQ_WR)
  22. attr->max_wr = RXE_MIN_SRQ_WR;
  23. if (attr->max_sge > rxe->attr.max_srq_sge) {
  24. pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
  25. attr->max_sge, rxe->attr.max_srq_sge);
  26. goto err1;
  27. }
  28. if (attr->max_sge < RXE_MIN_SRQ_SGE)
  29. attr->max_sge = RXE_MIN_SRQ_SGE;
  30. return 0;
  31. err1:
  32. return -EINVAL;
  33. }
  34. int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
  35. struct ib_srq_init_attr *init, struct ib_udata *udata,
  36. struct rxe_create_srq_resp __user *uresp)
  37. {
  38. int err;
  39. int srq_wqe_size;
  40. struct rxe_queue *q;
  41. enum queue_type type;
  42. srq->ibsrq.event_handler = init->event_handler;
  43. srq->ibsrq.srq_context = init->srq_context;
  44. srq->limit = init->attr.srq_limit;
  45. srq->srq_num = srq->elem.index;
  46. srq->rq.max_wr = init->attr.max_wr;
  47. srq->rq.max_sge = init->attr.max_sge;
  48. srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
  49. spin_lock_init(&srq->rq.producer_lock);
  50. spin_lock_init(&srq->rq.consumer_lock);
  51. type = QUEUE_TYPE_FROM_CLIENT;
  52. q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
  53. if (!q) {
  54. pr_warn("unable to allocate queue for srq\n");
  55. return -ENOMEM;
  56. }
  57. srq->rq.queue = q;
  58. err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
  59. q->buf_size, &q->ip);
  60. if (err) {
  61. vfree(q->buf);
  62. kfree(q);
  63. return err;
  64. }
  65. if (uresp) {
  66. if (copy_to_user(&uresp->srq_num, &srq->srq_num,
  67. sizeof(uresp->srq_num))) {
  68. rxe_queue_cleanup(q);
  69. return -EFAULT;
  70. }
  71. }
  72. return 0;
  73. }
  74. int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
  75. struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
  76. {
  77. if (srq->error) {
  78. pr_warn("srq in error state\n");
  79. goto err1;
  80. }
  81. if (mask & IB_SRQ_MAX_WR) {
  82. if (attr->max_wr > rxe->attr.max_srq_wr) {
  83. pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
  84. attr->max_wr, rxe->attr.max_srq_wr);
  85. goto err1;
  86. }
  87. if (attr->max_wr <= 0) {
  88. pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
  89. goto err1;
  90. }
  91. if (srq->limit && (attr->max_wr < srq->limit)) {
  92. pr_warn("max_wr (%d) < srq->limit (%d)\n",
  93. attr->max_wr, srq->limit);
  94. goto err1;
  95. }
  96. if (attr->max_wr < RXE_MIN_SRQ_WR)
  97. attr->max_wr = RXE_MIN_SRQ_WR;
  98. }
  99. if (mask & IB_SRQ_LIMIT) {
  100. if (attr->srq_limit > rxe->attr.max_srq_wr) {
  101. pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
  102. attr->srq_limit, rxe->attr.max_srq_wr);
  103. goto err1;
  104. }
  105. if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
  106. pr_warn("srq_limit (%d) > cur limit(%d)\n",
  107. attr->srq_limit,
  108. srq->rq.queue->buf->index_mask);
  109. goto err1;
  110. }
  111. }
  112. return 0;
  113. err1:
  114. return -EINVAL;
  115. }
  116. int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
  117. struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
  118. struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
  119. {
  120. int err;
  121. struct rxe_queue *q = srq->rq.queue;
  122. struct mminfo __user *mi = NULL;
  123. if (mask & IB_SRQ_MAX_WR) {
  124. /*
  125. * This is completely screwed up, the response is supposed to
  126. * be in the outbuf not like this.
  127. */
  128. mi = u64_to_user_ptr(ucmd->mmap_info_addr);
  129. err = rxe_queue_resize(q, &attr->max_wr,
  130. rcv_wqe_size(srq->rq.max_sge), udata, mi,
  131. &srq->rq.producer_lock,
  132. &srq->rq.consumer_lock);
  133. if (err)
  134. goto err2;
  135. }
  136. if (mask & IB_SRQ_LIMIT)
  137. srq->limit = attr->srq_limit;
  138. return 0;
  139. err2:
  140. rxe_queue_cleanup(q);
  141. srq->rq.queue = NULL;
  142. return err;
  143. }
  144. void rxe_srq_cleanup(struct rxe_pool_elem *elem)
  145. {
  146. struct rxe_srq *srq = container_of(elem, typeof(*srq), elem);
  147. if (srq->pd)
  148. rxe_put(srq->pd);
  149. if (srq->rq.queue)
  150. rxe_queue_cleanup(srq->rq.queue);
  151. }