cmd.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include "cmd.h"
  6. int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
  7. {
  8. u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
  9. u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
  10. int err;
  11. MLX5_SET(query_special_contexts_in, in, opcode,
  12. MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
  13. err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
  14. if (!err)
  15. *mkey = MLX5_GET(query_special_contexts_out, out,
  16. dump_fill_mkey);
  17. return err;
  18. }
  19. int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
  20. {
  21. u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
  22. u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
  23. int err;
  24. MLX5_SET(query_special_contexts_in, in, opcode,
  25. MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
  26. err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
  27. if (!err)
  28. *null_mkey = MLX5_GET(query_special_contexts_out, out,
  29. null_mkey);
  30. return err;
  31. }
  32. int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
  33. void *out)
  34. {
  35. u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
  36. MLX5_SET(query_cong_params_in, in, opcode,
  37. MLX5_CMD_OP_QUERY_CONG_PARAMS);
  38. MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
  39. return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
  40. }
  41. int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
  42. u64 length, u32 alignment)
  43. {
  44. struct mlx5_core_dev *dev = dm->dev;
  45. u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
  46. >> PAGE_SHIFT;
  47. u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
  48. u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
  49. u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
  50. u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
  51. u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
  52. u32 mlx5_alignment;
  53. u64 page_idx = 0;
  54. int ret = 0;
  55. if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
  56. return -EINVAL;
  57. /* mlx5 device sets alignment as 64*2^driver_value
  58. * so normalizing is needed.
  59. */
  60. mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
  61. alignment - MLX5_MEMIC_BASE_ALIGN;
  62. if (mlx5_alignment > max_alignment)
  63. return -EINVAL;
  64. MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
  65. MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
  66. MLX5_SET(alloc_memic_in, in, memic_size, length);
  67. MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
  68. mlx5_alignment);
  69. while (page_idx < num_memic_hw_pages) {
  70. spin_lock(&dm->lock);
  71. page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
  72. num_memic_hw_pages,
  73. page_idx,
  74. num_pages, 0);
  75. if (page_idx < num_memic_hw_pages)
  76. bitmap_set(dm->memic_alloc_pages,
  77. page_idx, num_pages);
  78. spin_unlock(&dm->lock);
  79. if (page_idx >= num_memic_hw_pages)
  80. break;
  81. MLX5_SET64(alloc_memic_in, in, range_start_addr,
  82. hw_start_addr + (page_idx * PAGE_SIZE));
  83. ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
  84. if (ret) {
  85. spin_lock(&dm->lock);
  86. bitmap_clear(dm->memic_alloc_pages,
  87. page_idx, num_pages);
  88. spin_unlock(&dm->lock);
  89. if (ret == -EAGAIN) {
  90. page_idx++;
  91. continue;
  92. }
  93. return ret;
  94. }
  95. *addr = dev->bar_addr +
  96. MLX5_GET64(alloc_memic_out, out, memic_start_addr);
  97. return 0;
  98. }
  99. return -ENOMEM;
  100. }
  101. void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
  102. {
  103. struct mlx5_core_dev *dev = dm->dev;
  104. u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
  105. u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
  106. u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
  107. u64 start_page_idx;
  108. int err;
  109. addr -= dev->bar_addr;
  110. start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
  111. MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
  112. MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
  113. MLX5_SET(dealloc_memic_in, in, memic_size, length);
  114. err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
  115. if (err)
  116. return;
  117. spin_lock(&dm->lock);
  118. bitmap_clear(dm->memic_alloc_pages,
  119. start_page_idx, num_pages);
  120. spin_unlock(&dm->lock);
  121. }
  122. void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
  123. {
  124. u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
  125. MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
  126. MLX5_SET(destroy_tir_in, in, tirn, tirn);
  127. MLX5_SET(destroy_tir_in, in, uid, uid);
  128. mlx5_cmd_exec_in(dev, destroy_tir, in);
  129. }
  130. void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
  131. {
  132. u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
  133. MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
  134. MLX5_SET(destroy_tis_in, in, tisn, tisn);
  135. MLX5_SET(destroy_tis_in, in, uid, uid);
  136. mlx5_cmd_exec_in(dev, destroy_tis, in);
  137. }
  138. int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
  139. {
  140. u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
  141. MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
  142. MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
  143. MLX5_SET(destroy_rqt_in, in, uid, uid);
  144. return mlx5_cmd_exec_in(dev, destroy_rqt, in);
  145. }
  146. int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
  147. u16 uid)
  148. {
  149. u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
  150. u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
  151. int err;
  152. MLX5_SET(alloc_transport_domain_in, in, opcode,
  153. MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
  154. MLX5_SET(alloc_transport_domain_in, in, uid, uid);
  155. err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
  156. if (!err)
  157. *tdn = MLX5_GET(alloc_transport_domain_out, out,
  158. transport_domain);
  159. return err;
  160. }
  161. void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
  162. u16 uid)
  163. {
  164. u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
  165. MLX5_SET(dealloc_transport_domain_in, in, opcode,
  166. MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
  167. MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
  168. MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
  169. mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
  170. }
  171. int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
  172. {
  173. u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
  174. MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
  175. MLX5_SET(dealloc_pd_in, in, pd, pdn);
  176. MLX5_SET(dealloc_pd_in, in, uid, uid);
  177. return mlx5_cmd_exec_in(dev, dealloc_pd, in);
  178. }
  179. int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
  180. u32 qpn, u16 uid)
  181. {
  182. u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
  183. void *gid;
  184. MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
  185. MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
  186. MLX5_SET(attach_to_mcg_in, in, uid, uid);
  187. gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
  188. memcpy(gid, mgid, sizeof(*mgid));
  189. return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
  190. }
  191. int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
  192. u32 qpn, u16 uid)
  193. {
  194. u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
  195. void *gid;
  196. MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
  197. MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
  198. MLX5_SET(detach_from_mcg_in, in, uid, uid);
  199. gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
  200. memcpy(gid, mgid, sizeof(*mgid));
  201. return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
  202. }
  203. int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
  204. {
  205. u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
  206. u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
  207. int err;
  208. MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
  209. MLX5_SET(alloc_xrcd_in, in, uid, uid);
  210. err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
  211. if (!err)
  212. *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
  213. return err;
  214. }
  215. int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
  216. {
  217. u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
  218. MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
  219. MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
  220. MLX5_SET(dealloc_xrcd_in, in, uid, uid);
  221. return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
  222. }
  223. int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
  224. u16 opmod, u8 port)
  225. {
  226. int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
  227. int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
  228. int err = -ENOMEM;
  229. void *data;
  230. void *resp;
  231. u32 *out;
  232. u32 *in;
  233. in = kzalloc(inlen, GFP_KERNEL);
  234. out = kzalloc(outlen, GFP_KERNEL);
  235. if (!in || !out)
  236. goto out;
  237. MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
  238. MLX5_SET(mad_ifc_in, in, op_mod, opmod);
  239. MLX5_SET(mad_ifc_in, in, port, port);
  240. data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
  241. memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
  242. err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
  243. if (err)
  244. goto out;
  245. resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
  246. memcpy(outb, resp,
  247. MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
  248. out:
  249. kfree(out);
  250. kfree(in);
  251. return err;
  252. }