srq_cmd.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/mlx5/driver.h>
  7. #include "mlx5_ib.h"
  8. #include "srq.h"
  9. #include "qp.h"
  10. static int get_pas_size(struct mlx5_srq_attr *in)
  11. {
  12. u32 log_page_size = in->log_page_size + 12;
  13. u32 log_srq_size = in->log_size;
  14. u32 log_rq_stride = in->wqe_shift;
  15. u32 page_offset = in->page_offset;
  16. u32 po_quanta = 1 << (log_page_size - 6);
  17. u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
  18. u32 page_size = 1 << log_page_size;
  19. u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
  20. u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
  21. return rq_num_pas * sizeof(u64);
  22. }
  23. static void set_wq(void *wq, struct mlx5_srq_attr *in)
  24. {
  25. MLX5_SET(wq, wq, wq_signature, !!(in->flags
  26. & MLX5_SRQ_FLAG_WQ_SIG));
  27. MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
  28. MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
  29. MLX5_SET(wq, wq, log_wq_sz, in->log_size);
  30. MLX5_SET(wq, wq, page_offset, in->page_offset);
  31. MLX5_SET(wq, wq, lwm, in->lwm);
  32. MLX5_SET(wq, wq, pd, in->pd);
  33. MLX5_SET64(wq, wq, dbr_addr, in->db_record);
  34. }
  35. static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
  36. {
  37. MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
  38. & MLX5_SRQ_FLAG_WQ_SIG));
  39. MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
  40. MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
  41. MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
  42. MLX5_SET(srqc, srqc, page_offset, in->page_offset);
  43. MLX5_SET(srqc, srqc, lwm, in->lwm);
  44. MLX5_SET(srqc, srqc, pd, in->pd);
  45. MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
  46. MLX5_SET(srqc, srqc, xrcd, in->xrcd);
  47. MLX5_SET(srqc, srqc, cqn, in->cqn);
  48. }
  49. static void get_wq(void *wq, struct mlx5_srq_attr *in)
  50. {
  51. if (MLX5_GET(wq, wq, wq_signature))
  52. in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
  53. in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
  54. in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
  55. in->log_size = MLX5_GET(wq, wq, log_wq_sz);
  56. in->page_offset = MLX5_GET(wq, wq, page_offset);
  57. in->lwm = MLX5_GET(wq, wq, lwm);
  58. in->pd = MLX5_GET(wq, wq, pd);
  59. in->db_record = MLX5_GET64(wq, wq, dbr_addr);
  60. }
  61. static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
  62. {
  63. if (MLX5_GET(srqc, srqc, wq_signature))
  64. in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
  65. in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
  66. in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
  67. in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
  68. in->page_offset = MLX5_GET(srqc, srqc, page_offset);
  69. in->lwm = MLX5_GET(srqc, srqc, lwm);
  70. in->pd = MLX5_GET(srqc, srqc, pd);
  71. in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
  72. }
  73. struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
  74. {
  75. struct mlx5_srq_table *table = &dev->srq_table;
  76. struct mlx5_core_srq *srq;
  77. xa_lock_irq(&table->array);
  78. srq = xa_load(&table->array, srqn);
  79. if (srq)
  80. refcount_inc(&srq->common.refcount);
  81. xa_unlock_irq(&table->array);
  82. return srq;
  83. }
  84. static int __set_srq_page_size(struct mlx5_srq_attr *in,
  85. unsigned long page_size)
  86. {
  87. if (!page_size)
  88. return -EINVAL;
  89. in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
  90. if (WARN_ON(get_pas_size(in) !=
  91. ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
  92. return -EINVAL;
  93. return 0;
  94. }
  95. #define set_srq_page_size(in, typ, log_pgsz_fld) \
  96. __set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff( \
  97. (in)->umem, typ, log_pgsz_fld, \
  98. MLX5_ADAPTER_PAGE_SHIFT, page_offset, \
  99. 64, &(in)->page_offset))
  100. static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  101. struct mlx5_srq_attr *in)
  102. {
  103. u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
  104. void *create_in;
  105. void *srqc;
  106. void *pas;
  107. int pas_size;
  108. int inlen;
  109. int err;
  110. if (in->umem) {
  111. err = set_srq_page_size(in, srqc, log_page_size);
  112. if (err)
  113. return err;
  114. }
  115. pas_size = get_pas_size(in);
  116. inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
  117. create_in = kvzalloc(inlen, GFP_KERNEL);
  118. if (!create_in)
  119. return -ENOMEM;
  120. MLX5_SET(create_srq_in, create_in, uid, in->uid);
  121. srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
  122. pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
  123. set_srqc(srqc, in);
  124. if (in->umem)
  125. mlx5_ib_populate_pas(
  126. in->umem,
  127. 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
  128. pas, 0);
  129. else
  130. memcpy(pas, in->pas, pas_size);
  131. MLX5_SET(create_srq_in, create_in, opcode,
  132. MLX5_CMD_OP_CREATE_SRQ);
  133. err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
  134. sizeof(create_out));
  135. kvfree(create_in);
  136. if (!err) {
  137. srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
  138. srq->uid = in->uid;
  139. }
  140. return err;
  141. }
  142. static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
  143. {
  144. u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
  145. MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
  146. MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
  147. MLX5_SET(destroy_srq_in, in, uid, srq->uid);
  148. return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
  149. }
  150. static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  151. u16 lwm, int is_srq)
  152. {
  153. u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
  154. MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
  155. MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
  156. MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
  157. MLX5_SET(arm_rq_in, in, lwm, lwm);
  158. MLX5_SET(arm_rq_in, in, uid, srq->uid);
  159. return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
  160. }
  161. static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  162. struct mlx5_srq_attr *out)
  163. {
  164. u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
  165. u32 *srq_out;
  166. void *srqc;
  167. int err;
  168. srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
  169. if (!srq_out)
  170. return -ENOMEM;
  171. MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
  172. MLX5_SET(query_srq_in, in, srqn, srq->srqn);
  173. err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
  174. if (err)
  175. goto out;
  176. srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
  177. get_srqc(srqc, out);
  178. if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
  179. out->flags |= MLX5_SRQ_FLAG_ERR;
  180. out:
  181. kvfree(srq_out);
  182. return err;
  183. }
  184. static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
  185. struct mlx5_core_srq *srq,
  186. struct mlx5_srq_attr *in)
  187. {
  188. u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
  189. void *create_in;
  190. void *xrc_srqc;
  191. void *pas;
  192. int pas_size;
  193. int inlen;
  194. int err;
  195. if (in->umem) {
  196. err = set_srq_page_size(in, xrc_srqc, log_page_size);
  197. if (err)
  198. return err;
  199. }
  200. pas_size = get_pas_size(in);
  201. inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
  202. create_in = kvzalloc(inlen, GFP_KERNEL);
  203. if (!create_in)
  204. return -ENOMEM;
  205. MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
  206. xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
  207. xrc_srq_context_entry);
  208. pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
  209. set_srqc(xrc_srqc, in);
  210. MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
  211. if (in->umem)
  212. mlx5_ib_populate_pas(
  213. in->umem,
  214. 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
  215. pas, 0);
  216. else
  217. memcpy(pas, in->pas, pas_size);
  218. MLX5_SET(create_xrc_srq_in, create_in, opcode,
  219. MLX5_CMD_OP_CREATE_XRC_SRQ);
  220. memset(create_out, 0, sizeof(create_out));
  221. err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
  222. sizeof(create_out));
  223. if (err)
  224. goto out;
  225. srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
  226. srq->uid = in->uid;
  227. out:
  228. kvfree(create_in);
  229. return err;
  230. }
  231. static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
  232. struct mlx5_core_srq *srq)
  233. {
  234. u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
  235. MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
  236. MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
  237. MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
  238. return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
  239. }
  240. static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  241. u16 lwm)
  242. {
  243. u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
  244. MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
  245. MLX5_SET(arm_xrc_srq_in, in, op_mod,
  246. MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
  247. MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
  248. MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
  249. MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
  250. return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
  251. }
  252. static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
  253. struct mlx5_core_srq *srq,
  254. struct mlx5_srq_attr *out)
  255. {
  256. u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
  257. u32 *xrcsrq_out;
  258. void *xrc_srqc;
  259. int err;
  260. xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
  261. if (!xrcsrq_out)
  262. return -ENOMEM;
  263. MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
  264. MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
  265. err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
  266. if (err)
  267. goto out;
  268. xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
  269. xrc_srq_context_entry);
  270. get_srqc(xrc_srqc, out);
  271. if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
  272. out->flags |= MLX5_SRQ_FLAG_ERR;
  273. out:
  274. kvfree(xrcsrq_out);
  275. return err;
  276. }
  277. static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  278. struct mlx5_srq_attr *in)
  279. {
  280. void *create_out = NULL;
  281. void *create_in = NULL;
  282. void *rmpc;
  283. void *wq;
  284. void *pas;
  285. int pas_size;
  286. int outlen;
  287. int inlen;
  288. int err;
  289. if (in->umem) {
  290. err = set_srq_page_size(in, wq, log_wq_pg_sz);
  291. if (err)
  292. return err;
  293. }
  294. pas_size = get_pas_size(in);
  295. inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
  296. outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
  297. create_in = kvzalloc(inlen, GFP_KERNEL);
  298. create_out = kvzalloc(outlen, GFP_KERNEL);
  299. if (!create_in || !create_out) {
  300. err = -ENOMEM;
  301. goto out;
  302. }
  303. rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
  304. wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
  305. MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
  306. MLX5_SET(create_rmp_in, create_in, uid, in->uid);
  307. pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
  308. set_wq(wq, in);
  309. if (in->umem)
  310. mlx5_ib_populate_pas(
  311. in->umem,
  312. 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
  313. pas, 0);
  314. else
  315. memcpy(pas, in->pas, pas_size);
  316. MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
  317. err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
  318. if (!err) {
  319. srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
  320. srq->uid = in->uid;
  321. }
  322. out:
  323. kvfree(create_in);
  324. kvfree(create_out);
  325. return err;
  326. }
  327. static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
  328. {
  329. u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
  330. MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
  331. MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
  332. MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
  333. return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
  334. }
  335. static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  336. u16 lwm)
  337. {
  338. void *out = NULL;
  339. void *in = NULL;
  340. void *rmpc;
  341. void *wq;
  342. void *bitmask;
  343. int outlen;
  344. int inlen;
  345. int err;
  346. inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
  347. outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
  348. in = kvzalloc(inlen, GFP_KERNEL);
  349. out = kvzalloc(outlen, GFP_KERNEL);
  350. if (!in || !out) {
  351. err = -ENOMEM;
  352. goto out;
  353. }
  354. rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
  355. bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
  356. wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
  357. MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
  358. MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
  359. MLX5_SET(modify_rmp_in, in, uid, srq->uid);
  360. MLX5_SET(wq, wq, lwm, lwm);
  361. MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
  362. MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
  363. MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
  364. err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
  365. out:
  366. kvfree(in);
  367. kvfree(out);
  368. return err;
  369. }
  370. static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  371. struct mlx5_srq_attr *out)
  372. {
  373. u32 *rmp_out = NULL;
  374. u32 *rmp_in = NULL;
  375. void *rmpc;
  376. int outlen;
  377. int inlen;
  378. int err;
  379. outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
  380. inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
  381. rmp_out = kvzalloc(outlen, GFP_KERNEL);
  382. rmp_in = kvzalloc(inlen, GFP_KERNEL);
  383. if (!rmp_out || !rmp_in) {
  384. err = -ENOMEM;
  385. goto out;
  386. }
  387. MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
  388. MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
  389. err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
  390. if (err)
  391. goto out;
  392. rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
  393. get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
  394. if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
  395. out->flags |= MLX5_SRQ_FLAG_ERR;
  396. out:
  397. kvfree(rmp_out);
  398. kvfree(rmp_in);
  399. return err;
  400. }
  401. static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  402. struct mlx5_srq_attr *in)
  403. {
  404. u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
  405. void *create_in;
  406. void *xrqc;
  407. void *wq;
  408. void *pas;
  409. int pas_size;
  410. int inlen;
  411. int err;
  412. if (in->umem) {
  413. err = set_srq_page_size(in, wq, log_wq_pg_sz);
  414. if (err)
  415. return err;
  416. }
  417. pas_size = get_pas_size(in);
  418. inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
  419. create_in = kvzalloc(inlen, GFP_KERNEL);
  420. if (!create_in)
  421. return -ENOMEM;
  422. xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
  423. wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
  424. pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
  425. set_wq(wq, in);
  426. if (in->umem)
  427. mlx5_ib_populate_pas(
  428. in->umem,
  429. 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
  430. pas, 0);
  431. else
  432. memcpy(pas, in->pas, pas_size);
  433. if (in->type == IB_SRQT_TM) {
  434. MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
  435. if (in->flags & MLX5_SRQ_FLAG_RNDV)
  436. MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
  437. MLX5_SET(xrqc, xrqc,
  438. tag_matching_topology_context.log_matching_list_sz,
  439. in->tm_log_list_size);
  440. }
  441. MLX5_SET(xrqc, xrqc, user_index, in->user_index);
  442. MLX5_SET(xrqc, xrqc, cqn, in->cqn);
  443. MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
  444. MLX5_SET(create_xrq_in, create_in, uid, in->uid);
  445. err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
  446. sizeof(create_out));
  447. kvfree(create_in);
  448. if (!err) {
  449. srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
  450. srq->uid = in->uid;
  451. }
  452. return err;
  453. }
  454. static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
  455. {
  456. u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
  457. MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
  458. MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
  459. MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
  460. return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
  461. }
  462. static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
  463. struct mlx5_core_srq *srq,
  464. u16 lwm)
  465. {
  466. u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
  467. MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
  468. MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
  469. MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
  470. MLX5_SET(arm_rq_in, in, lwm, lwm);
  471. MLX5_SET(arm_rq_in, in, uid, srq->uid);
  472. return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
  473. }
  474. static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  475. struct mlx5_srq_attr *out)
  476. {
  477. u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
  478. u32 *xrq_out;
  479. int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
  480. void *xrqc;
  481. int err;
  482. xrq_out = kvzalloc(outlen, GFP_KERNEL);
  483. if (!xrq_out)
  484. return -ENOMEM;
  485. MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
  486. MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
  487. err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
  488. if (err)
  489. goto out;
  490. xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
  491. get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
  492. if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
  493. out->flags |= MLX5_SRQ_FLAG_ERR;
  494. out->tm_next_tag =
  495. MLX5_GET(xrqc, xrqc,
  496. tag_matching_topology_context.append_next_index);
  497. out->tm_hw_phase_cnt =
  498. MLX5_GET(xrqc, xrqc,
  499. tag_matching_topology_context.hw_phase_cnt);
  500. out->tm_sw_phase_cnt =
  501. MLX5_GET(xrqc, xrqc,
  502. tag_matching_topology_context.sw_phase_cnt);
  503. out:
  504. kvfree(xrq_out);
  505. return err;
  506. }
  507. static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  508. struct mlx5_srq_attr *in)
  509. {
  510. if (!dev->mdev->issi)
  511. return create_srq_cmd(dev, srq, in);
  512. switch (srq->common.res) {
  513. case MLX5_RES_XSRQ:
  514. return create_xrc_srq_cmd(dev, srq, in);
  515. case MLX5_RES_XRQ:
  516. return create_xrq_cmd(dev, srq, in);
  517. default:
  518. return create_rmp_cmd(dev, srq, in);
  519. }
  520. }
  521. static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
  522. {
  523. if (!dev->mdev->issi)
  524. return destroy_srq_cmd(dev, srq);
  525. switch (srq->common.res) {
  526. case MLX5_RES_XSRQ:
  527. return destroy_xrc_srq_cmd(dev, srq);
  528. case MLX5_RES_XRQ:
  529. return destroy_xrq_cmd(dev, srq);
  530. default:
  531. return destroy_rmp_cmd(dev, srq);
  532. }
  533. }
  534. int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  535. struct mlx5_srq_attr *in)
  536. {
  537. struct mlx5_srq_table *table = &dev->srq_table;
  538. int err;
  539. switch (in->type) {
  540. case IB_SRQT_XRC:
  541. srq->common.res = MLX5_RES_XSRQ;
  542. break;
  543. case IB_SRQT_TM:
  544. srq->common.res = MLX5_RES_XRQ;
  545. break;
  546. default:
  547. srq->common.res = MLX5_RES_SRQ;
  548. }
  549. err = create_srq_split(dev, srq, in);
  550. if (err)
  551. return err;
  552. refcount_set(&srq->common.refcount, 1);
  553. init_completion(&srq->common.free);
  554. err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
  555. if (err)
  556. goto err_destroy_srq_split;
  557. return 0;
  558. err_destroy_srq_split:
  559. destroy_srq_split(dev, srq);
  560. return err;
  561. }
  562. int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
  563. {
  564. struct mlx5_srq_table *table = &dev->srq_table;
  565. struct mlx5_core_srq *tmp;
  566. int err;
  567. /* Delete entry, but leave index occupied */
  568. tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
  569. if (WARN_ON(tmp != srq))
  570. return xa_err(tmp) ?: -EINVAL;
  571. err = destroy_srq_split(dev, srq);
  572. if (err) {
  573. /*
  574. * We don't need to check returned result for an error,
  575. * because we are storing in pre-allocated space xarray
  576. * entry and it can't fail at this stage.
  577. */
  578. xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
  579. return err;
  580. }
  581. xa_erase_irq(&table->array, srq->srqn);
  582. mlx5_core_res_put(&srq->common);
  583. wait_for_completion(&srq->common.free);
  584. return 0;
  585. }
  586. int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  587. struct mlx5_srq_attr *out)
  588. {
  589. if (!dev->mdev->issi)
  590. return query_srq_cmd(dev, srq, out);
  591. switch (srq->common.res) {
  592. case MLX5_RES_XSRQ:
  593. return query_xrc_srq_cmd(dev, srq, out);
  594. case MLX5_RES_XRQ:
  595. return query_xrq_cmd(dev, srq, out);
  596. default:
  597. return query_rmp_cmd(dev, srq, out);
  598. }
  599. }
  600. int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
  601. u16 lwm, int is_srq)
  602. {
  603. if (!dev->mdev->issi)
  604. return arm_srq_cmd(dev, srq, lwm, is_srq);
  605. switch (srq->common.res) {
  606. case MLX5_RES_XSRQ:
  607. return arm_xrc_srq_cmd(dev, srq, lwm);
  608. case MLX5_RES_XRQ:
  609. return arm_xrq_cmd(dev, srq, lwm);
  610. default:
  611. return arm_rmp_cmd(dev, srq, lwm);
  612. }
  613. }
  614. static int srq_event_notifier(struct notifier_block *nb,
  615. unsigned long type, void *data)
  616. {
  617. struct mlx5_srq_table *table;
  618. struct mlx5_core_srq *srq;
  619. struct mlx5_eqe *eqe;
  620. u32 srqn;
  621. if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
  622. type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
  623. return NOTIFY_DONE;
  624. table = container_of(nb, struct mlx5_srq_table, nb);
  625. eqe = data;
  626. srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
  627. xa_lock(&table->array);
  628. srq = xa_load(&table->array, srqn);
  629. if (srq)
  630. refcount_inc(&srq->common.refcount);
  631. xa_unlock(&table->array);
  632. if (!srq)
  633. return NOTIFY_OK;
  634. srq->event(srq, eqe->type);
  635. mlx5_core_res_put(&srq->common);
  636. return NOTIFY_OK;
  637. }
  638. int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
  639. {
  640. struct mlx5_srq_table *table = &dev->srq_table;
  641. memset(table, 0, sizeof(*table));
  642. xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
  643. table->nb.notifier_call = srq_event_notifier;
  644. mlx5_notifier_register(dev->mdev, &table->nb);
  645. return 0;
  646. }
  647. void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
  648. {
  649. struct mlx5_srq_table *table = &dev->srq_table;
  650. mlx5_notifier_unregister(dev->mdev, &table->nb);
  651. }