mlx5_ib.h 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
  4. * Copyright (c) 2020, Intel Corporation. All rights reserved.
  5. */
  6. #ifndef MLX5_IB_H
  7. #define MLX5_IB_H
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <rdma/ib_verbs.h>
  11. #include <rdma/ib_umem.h>
  12. #include <rdma/ib_smi.h>
  13. #include <linux/mlx5/driver.h>
  14. #include <linux/mlx5/cq.h>
  15. #include <linux/mlx5/fs.h>
  16. #include <linux/mlx5/qp.h>
  17. #include <linux/types.h>
  18. #include <linux/mlx5/transobj.h>
  19. #include <rdma/ib_user_verbs.h>
  20. #include <rdma/mlx5-abi.h>
  21. #include <rdma/uverbs_ioctl.h>
  22. #include <rdma/mlx5_user_ioctl_cmds.h>
  23. #include <rdma/mlx5_user_ioctl_verbs.h>
  24. #include "srq.h"
  25. #define mlx5_ib_dbg(_dev, format, arg...) \
  26. dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
  27. __LINE__, current->pid, ##arg)
  28. #define mlx5_ib_err(_dev, format, arg...) \
  29. dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
  30. __LINE__, current->pid, ##arg)
  31. #define mlx5_ib_warn(_dev, format, arg...) \
  32. dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
  33. __LINE__, current->pid, ##arg)
  34. #define MLX5_IB_DEFAULT_UIDX 0xffffff
  35. #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
  36. static __always_inline unsigned long
  37. __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
  38. unsigned int pgsz_shift)
  39. {
  40. unsigned int largest_pg_shift =
  41. min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
  42. BITS_PER_LONG - 1);
  43. /*
  44. * Despite a command allowing it, the device does not support lower than
  45. * 4k page size.
  46. */
  47. pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
  48. return GENMASK(largest_pg_shift, pgsz_shift);
  49. }
  50. /*
  51. * For mkc users, instead of a page_offset the command has a start_iova which
  52. * specifies both the page_offset and the on-the-wire IOVA
  53. */
  54. #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
  55. ib_umem_find_best_pgsz(umem, \
  56. __mlx5_log_page_size_to_bitmap( \
  57. __mlx5_bit_sz(typ, log_pgsz_fld), \
  58. pgsz_shift), \
  59. iova)
  60. static __always_inline unsigned long
  61. __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
  62. unsigned int offset_shift)
  63. {
  64. unsigned int largest_offset_shift =
  65. min_t(unsigned long, page_offset_bits - 1 + offset_shift,
  66. BITS_PER_LONG - 1);
  67. return GENMASK(largest_offset_shift, offset_shift);
  68. }
  69. /*
  70. * QP/CQ/WQ/etc type commands take a page offset that satisifies:
  71. * page_offset_quantized * (page_size/scale) = page_offset
  72. * Which restricts allowed page sizes to ones that satisify the above.
  73. */
  74. unsigned long __mlx5_umem_find_best_quantized_pgoff(
  75. struct ib_umem *umem, unsigned long pgsz_bitmap,
  76. unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
  77. unsigned int *page_offset_quantized);
  78. #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
  79. pgsz_shift, page_offset_fld, \
  80. scale, page_offset_quantized) \
  81. __mlx5_umem_find_best_quantized_pgoff( \
  82. umem, \
  83. __mlx5_log_page_size_to_bitmap( \
  84. __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
  85. __mlx5_bit_sz(typ, page_offset_fld), \
  86. GENMASK(31, order_base_2(scale)), scale, \
  87. page_offset_quantized)
  88. #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
  89. pgsz_shift, page_offset_fld, \
  90. scale, page_offset_quantized) \
  91. __mlx5_umem_find_best_quantized_pgoff( \
  92. umem, \
  93. __mlx5_log_page_size_to_bitmap( \
  94. __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
  95. __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
  96. page_offset_quantized)
  97. enum {
  98. MLX5_IB_MMAP_OFFSET_START = 9,
  99. MLX5_IB_MMAP_OFFSET_END = 255,
  100. };
  101. enum {
  102. MLX5_IB_MMAP_CMD_SHIFT = 8,
  103. MLX5_IB_MMAP_CMD_MASK = 0xff,
  104. };
  105. enum {
  106. MLX5_RES_SCAT_DATA32_CQE = 0x1,
  107. MLX5_RES_SCAT_DATA64_CQE = 0x2,
  108. MLX5_REQ_SCAT_DATA32_CQE = 0x11,
  109. MLX5_REQ_SCAT_DATA64_CQE = 0x22,
  110. };
  111. enum mlx5_ib_mad_ifc_flags {
  112. MLX5_MAD_IFC_IGNORE_MKEY = 1,
  113. MLX5_MAD_IFC_IGNORE_BKEY = 2,
  114. MLX5_MAD_IFC_NET_VIEW = 4,
  115. };
  116. enum {
  117. MLX5_CROSS_CHANNEL_BFREG = 0,
  118. };
  119. enum {
  120. MLX5_CQE_VERSION_V0,
  121. MLX5_CQE_VERSION_V1,
  122. };
  123. enum {
  124. MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
  125. MLX5_TM_MAX_SGE = 1,
  126. };
  127. enum {
  128. MLX5_IB_INVALID_UAR_INDEX = BIT(31),
  129. MLX5_IB_INVALID_BFREG = BIT(31),
  130. };
  131. enum {
  132. MLX5_MAX_MEMIC_PAGES = 0x100,
  133. MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
  134. };
  135. enum {
  136. MLX5_MEMIC_BASE_ALIGN = 6,
  137. MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
  138. };
  139. enum mlx5_ib_mmap_type {
  140. MLX5_IB_MMAP_TYPE_MEMIC = 1,
  141. MLX5_IB_MMAP_TYPE_VAR = 2,
  142. MLX5_IB_MMAP_TYPE_UAR_WC = 3,
  143. MLX5_IB_MMAP_TYPE_UAR_NC = 4,
  144. MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
  145. };
  146. struct mlx5_bfreg_info {
  147. u32 *sys_pages;
  148. int num_low_latency_bfregs;
  149. unsigned int *count;
  150. /*
  151. * protect bfreg allocation data structs
  152. */
  153. struct mutex lock;
  154. u32 ver;
  155. u8 lib_uar_4k : 1;
  156. u8 lib_uar_dyn : 1;
  157. u32 num_sys_pages;
  158. u32 num_static_sys_pages;
  159. u32 total_num_bfregs;
  160. u32 num_dyn_bfregs;
  161. };
  162. struct mlx5_ib_ucontext {
  163. struct ib_ucontext ibucontext;
  164. struct list_head db_page_list;
  165. /* protect doorbell record alloc/free
  166. */
  167. struct mutex db_page_mutex;
  168. struct mlx5_bfreg_info bfregi;
  169. u8 cqe_version;
  170. /* Transport Domain number */
  171. u32 tdn;
  172. u64 lib_caps;
  173. u16 devx_uid;
  174. /* For RoCE LAG TX affinity */
  175. atomic_t tx_port_affinity;
  176. };
  177. static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
  178. {
  179. return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
  180. }
  181. struct mlx5_ib_pd {
  182. struct ib_pd ibpd;
  183. u32 pdn;
  184. u16 uid;
  185. };
  186. enum {
  187. MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
  188. MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
  189. MLX5_IB_FLOW_ACTION_DECAP,
  190. };
  191. #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
  192. #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
  193. #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
  194. #error "Invalid number of bypass priorities"
  195. #endif
  196. #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
  197. #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
  198. #define MLX5_IB_NUM_SNIFFER_FTS 2
  199. #define MLX5_IB_NUM_EGRESS_FTS 1
  200. #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
  201. struct mlx5_ib_anchor {
  202. struct mlx5_flow_table *ft;
  203. struct mlx5_flow_group *fg_goto_table;
  204. struct mlx5_flow_group *fg_drop;
  205. struct mlx5_flow_handle *rule_goto_table;
  206. struct mlx5_flow_handle *rule_drop;
  207. unsigned int rule_goto_table_ref;
  208. };
  209. struct mlx5_ib_flow_prio {
  210. struct mlx5_flow_table *flow_table;
  211. struct mlx5_ib_anchor anchor;
  212. unsigned int refcount;
  213. };
  214. struct mlx5_ib_flow_handler {
  215. struct list_head list;
  216. struct ib_flow ibflow;
  217. struct mlx5_ib_flow_prio *prio;
  218. struct mlx5_flow_handle *rule;
  219. struct ib_counters *ibcounters;
  220. struct mlx5_ib_dev *dev;
  221. struct mlx5_ib_flow_matcher *flow_matcher;
  222. };
  223. struct mlx5_ib_flow_matcher {
  224. struct mlx5_ib_match_params matcher_mask;
  225. int mask_len;
  226. enum mlx5_ib_flow_type flow_type;
  227. enum mlx5_flow_namespace_type ns_type;
  228. u16 priority;
  229. struct mlx5_core_dev *mdev;
  230. atomic_t usecnt;
  231. u8 match_criteria_enable;
  232. };
  233. struct mlx5_ib_steering_anchor {
  234. struct mlx5_ib_flow_prio *ft_prio;
  235. struct mlx5_ib_dev *dev;
  236. atomic_t usecnt;
  237. };
  238. struct mlx5_ib_pp {
  239. u16 index;
  240. struct mlx5_core_dev *mdev;
  241. };
  242. enum mlx5_ib_optional_counter_type {
  243. MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
  244. MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
  245. MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
  246. MLX5_IB_OPCOUNTER_MAX,
  247. };
  248. struct mlx5_ib_flow_db {
  249. struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
  250. struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
  251. struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
  252. struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
  253. struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS];
  254. struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
  255. struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
  256. struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
  257. struct mlx5_flow_table *lag_demux_ft;
  258. /* Protect flow steering bypass flow tables
  259. * when add/del flow rules.
  260. * only single add/removal of flow steering rule could be done
  261. * simultaneously.
  262. */
  263. struct mutex lock;
  264. };
  265. /* Use macros here so that don't have to duplicate
  266. * enum ib_qp_type for low-level driver
  267. */
  268. #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
  269. /*
  270. * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
  271. * creates the actual hardware QP.
  272. */
  273. #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
  274. #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
  275. #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
  276. #define MLX5_IB_WR_UMR IB_WR_RESERVED1
  277. #define MLX5_IB_UPD_XLT_ZAP BIT(0)
  278. #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
  279. #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
  280. #define MLX5_IB_UPD_XLT_ADDR BIT(3)
  281. #define MLX5_IB_UPD_XLT_PD BIT(4)
  282. #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
  283. #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
  284. /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
  285. *
  286. * These flags are intended for internal use by the mlx5_ib driver, and they
  287. * rely on the range reserved for that use in the ib_qp_create_flags enum.
  288. */
  289. #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
  290. #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
  291. struct wr_list {
  292. u16 opcode;
  293. u16 next;
  294. };
  295. enum mlx5_ib_rq_flags {
  296. MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
  297. MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
  298. };
  299. struct mlx5_ib_wq {
  300. struct mlx5_frag_buf_ctrl fbc;
  301. u64 *wrid;
  302. u32 *wr_data;
  303. struct wr_list *w_list;
  304. unsigned *wqe_head;
  305. u16 unsig_count;
  306. /* serialize post to the work queue
  307. */
  308. spinlock_t lock;
  309. int wqe_cnt;
  310. int max_post;
  311. int max_gs;
  312. int offset;
  313. int wqe_shift;
  314. unsigned head;
  315. unsigned tail;
  316. u16 cur_post;
  317. u16 last_poll;
  318. void *cur_edge;
  319. };
  320. enum mlx5_ib_wq_flags {
  321. MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
  322. MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
  323. };
  324. #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
  325. #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
  326. #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
  327. #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
  328. #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
  329. struct mlx5_ib_rwq {
  330. struct ib_wq ibwq;
  331. struct mlx5_core_qp core_qp;
  332. u32 rq_num_pas;
  333. u32 log_rq_stride;
  334. u32 log_rq_size;
  335. u32 rq_page_offset;
  336. u32 log_page_size;
  337. u32 log_num_strides;
  338. u32 two_byte_shift_en;
  339. u32 single_stride_log_num_of_bytes;
  340. struct ib_umem *umem;
  341. size_t buf_size;
  342. unsigned int page_shift;
  343. struct mlx5_db db;
  344. u32 user_index;
  345. u32 wqe_count;
  346. u32 wqe_shift;
  347. int wq_sig;
  348. u32 create_flags; /* Use enum mlx5_ib_wq_flags */
  349. };
  350. struct mlx5_ib_rwq_ind_table {
  351. struct ib_rwq_ind_table ib_rwq_ind_tbl;
  352. u32 rqtn;
  353. u16 uid;
  354. };
  355. struct mlx5_ib_ubuffer {
  356. struct ib_umem *umem;
  357. int buf_size;
  358. u64 buf_addr;
  359. };
  360. struct mlx5_ib_qp_base {
  361. struct mlx5_ib_qp *container_mibqp;
  362. struct mlx5_core_qp mqp;
  363. struct mlx5_ib_ubuffer ubuffer;
  364. };
  365. struct mlx5_ib_qp_trans {
  366. struct mlx5_ib_qp_base base;
  367. u16 xrcdn;
  368. u32 alt_port;
  369. u8 atomic_rd_en;
  370. u8 resp_depth;
  371. };
  372. struct mlx5_ib_rss_qp {
  373. u32 tirn;
  374. };
  375. struct mlx5_ib_rq {
  376. struct mlx5_ib_qp_base base;
  377. struct mlx5_ib_wq *rq;
  378. struct mlx5_ib_ubuffer ubuffer;
  379. struct mlx5_db *doorbell;
  380. u32 tirn;
  381. u8 state;
  382. u32 flags;
  383. };
  384. struct mlx5_ib_sq {
  385. struct mlx5_ib_qp_base base;
  386. struct mlx5_ib_wq *sq;
  387. struct mlx5_ib_ubuffer ubuffer;
  388. struct mlx5_db *doorbell;
  389. struct mlx5_flow_handle *flow_rule;
  390. u32 tisn;
  391. u8 state;
  392. };
  393. struct mlx5_ib_raw_packet_qp {
  394. struct mlx5_ib_sq sq;
  395. struct mlx5_ib_rq rq;
  396. };
  397. struct mlx5_bf {
  398. int buf_size;
  399. unsigned long offset;
  400. struct mlx5_sq_bfreg *bfreg;
  401. };
  402. struct mlx5_ib_dct {
  403. struct mlx5_core_dct mdct;
  404. u32 *in;
  405. };
  406. struct mlx5_ib_gsi_qp {
  407. struct ib_qp *rx_qp;
  408. u32 port_num;
  409. struct ib_qp_cap cap;
  410. struct ib_cq *cq;
  411. struct mlx5_ib_gsi_wr *outstanding_wrs;
  412. u32 outstanding_pi, outstanding_ci;
  413. int num_qps;
  414. /* Protects access to the tx_qps. Post send operations synchronize
  415. * with tx_qp creation in setup_qp(). Also protects the
  416. * outstanding_wrs array and indices.
  417. */
  418. spinlock_t lock;
  419. struct ib_qp **tx_qps;
  420. };
  421. struct mlx5_ib_qp {
  422. struct ib_qp ibqp;
  423. union {
  424. struct mlx5_ib_qp_trans trans_qp;
  425. struct mlx5_ib_raw_packet_qp raw_packet_qp;
  426. struct mlx5_ib_rss_qp rss_qp;
  427. struct mlx5_ib_dct dct;
  428. struct mlx5_ib_gsi_qp gsi;
  429. };
  430. struct mlx5_frag_buf buf;
  431. struct mlx5_db db;
  432. struct mlx5_ib_wq rq;
  433. u8 sq_signal_bits;
  434. u8 next_fence;
  435. struct mlx5_ib_wq sq;
  436. /* serialize qp state modifications
  437. */
  438. struct mutex mutex;
  439. /* cached variant of create_flags from struct ib_qp_init_attr */
  440. u32 flags;
  441. u32 port;
  442. u8 state;
  443. int max_inline_data;
  444. struct mlx5_bf bf;
  445. u8 has_rq:1;
  446. u8 is_rss:1;
  447. /* only for user space QPs. For kernel
  448. * we have it from the bf object
  449. */
  450. int bfregn;
  451. struct list_head qps_list;
  452. struct list_head cq_recv_list;
  453. struct list_head cq_send_list;
  454. struct mlx5_rate_limit rl;
  455. u32 underlay_qpn;
  456. u32 flags_en;
  457. /*
  458. * IB/core doesn't store low-level QP types, so
  459. * store both MLX and IBTA types in the field below.
  460. */
  461. enum ib_qp_type type;
  462. /* A flag to indicate if there's a new counter is configured
  463. * but not take effective
  464. */
  465. u32 counter_pending;
  466. u16 gsi_lag_port;
  467. };
  468. struct mlx5_ib_cq_buf {
  469. struct mlx5_frag_buf_ctrl fbc;
  470. struct mlx5_frag_buf frag_buf;
  471. struct ib_umem *umem;
  472. int cqe_size;
  473. int nent;
  474. };
  475. enum mlx5_ib_cq_pr_flags {
  476. MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
  477. MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
  478. };
  479. struct mlx5_ib_cq {
  480. struct ib_cq ibcq;
  481. struct mlx5_core_cq mcq;
  482. struct mlx5_ib_cq_buf buf;
  483. struct mlx5_db db;
  484. /* serialize access to the CQ
  485. */
  486. spinlock_t lock;
  487. /* protect resize cq
  488. */
  489. struct mutex resize_mutex;
  490. struct mlx5_ib_cq_buf *resize_buf;
  491. struct ib_umem *resize_umem;
  492. int cqe_size;
  493. struct list_head list_send_qp;
  494. struct list_head list_recv_qp;
  495. u32 create_flags;
  496. struct list_head wc_list;
  497. enum ib_cq_notify_flags notify_flags;
  498. struct work_struct notify_work;
  499. u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
  500. };
  501. struct mlx5_ib_wc {
  502. struct ib_wc wc;
  503. struct list_head list;
  504. };
  505. struct mlx5_ib_srq {
  506. struct ib_srq ibsrq;
  507. struct mlx5_core_srq msrq;
  508. struct mlx5_frag_buf buf;
  509. struct mlx5_db db;
  510. struct mlx5_frag_buf_ctrl fbc;
  511. u64 *wrid;
  512. /* protect SRQ hanlding
  513. */
  514. spinlock_t lock;
  515. int head;
  516. int tail;
  517. u16 wqe_ctr;
  518. struct ib_umem *umem;
  519. /* serialize arming a SRQ
  520. */
  521. struct mutex mutex;
  522. int wq_sig;
  523. };
  524. struct mlx5_ib_xrcd {
  525. struct ib_xrcd ibxrcd;
  526. u32 xrcdn;
  527. };
  528. enum mlx5_ib_mtt_access_flags {
  529. MLX5_IB_MTT_READ = (1 << 0),
  530. MLX5_IB_MTT_WRITE = (1 << 1),
  531. };
  532. struct mlx5_user_mmap_entry {
  533. struct rdma_user_mmap_entry rdma_entry;
  534. u8 mmap_flag;
  535. u64 address;
  536. u32 page_idx;
  537. };
  538. enum mlx5_mkey_type {
  539. MLX5_MKEY_MR = 1,
  540. MLX5_MKEY_MW,
  541. MLX5_MKEY_INDIRECT_DEVX,
  542. };
  543. struct mlx5_ib_mkey {
  544. u32 key;
  545. enum mlx5_mkey_type type;
  546. unsigned int ndescs;
  547. struct wait_queue_head wait;
  548. refcount_t usecount;
  549. struct mlx5_cache_ent *cache_ent;
  550. };
  551. #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
  552. #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
  553. IB_ACCESS_REMOTE_WRITE |\
  554. IB_ACCESS_REMOTE_READ |\
  555. IB_ACCESS_REMOTE_ATOMIC |\
  556. IB_ZERO_BASED)
  557. #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
  558. IB_ACCESS_REMOTE_WRITE |\
  559. IB_ACCESS_REMOTE_READ |\
  560. IB_ZERO_BASED)
  561. #define mlx5_update_odp_stats(mr, counter_name, value) \
  562. atomic64_add(value, &((mr)->odp_stats.counter_name))
  563. struct mlx5_ib_mr {
  564. struct ib_mr ibmr;
  565. struct mlx5_ib_mkey mmkey;
  566. struct ib_umem *umem;
  567. union {
  568. /* Used only by kernel MRs (umem == NULL) */
  569. struct {
  570. void *descs;
  571. void *descs_alloc;
  572. dma_addr_t desc_map;
  573. int max_descs;
  574. int desc_size;
  575. int access_mode;
  576. /* For Kernel IB_MR_TYPE_INTEGRITY */
  577. struct mlx5_core_sig_ctx *sig;
  578. struct mlx5_ib_mr *pi_mr;
  579. struct mlx5_ib_mr *klm_mr;
  580. struct mlx5_ib_mr *mtt_mr;
  581. u64 data_iova;
  582. u64 pi_iova;
  583. int meta_ndescs;
  584. int meta_length;
  585. int data_length;
  586. };
  587. /* Used only by User MRs (umem != NULL) */
  588. struct {
  589. unsigned int page_shift;
  590. /* Current access_flags */
  591. int access_flags;
  592. /* For User ODP */
  593. struct mlx5_ib_mr *parent;
  594. struct xarray implicit_children;
  595. union {
  596. struct work_struct work;
  597. } odp_destroy;
  598. struct ib_odp_counters odp_stats;
  599. bool is_odp_implicit;
  600. };
  601. };
  602. };
  603. static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
  604. {
  605. return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
  606. mr->umem->is_odp;
  607. }
  608. static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
  609. {
  610. return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
  611. mr->umem->is_dmabuf;
  612. }
  613. struct mlx5_ib_mw {
  614. struct ib_mw ibmw;
  615. struct mlx5_ib_mkey mmkey;
  616. };
  617. struct mlx5_ib_umr_context {
  618. struct ib_cqe cqe;
  619. enum ib_wc_status status;
  620. struct completion done;
  621. };
  622. enum {
  623. MLX5_UMR_STATE_UNINIT,
  624. MLX5_UMR_STATE_ACTIVE,
  625. MLX5_UMR_STATE_RECOVER,
  626. MLX5_UMR_STATE_ERR,
  627. };
  628. struct umr_common {
  629. struct ib_pd *pd;
  630. struct ib_cq *cq;
  631. struct ib_qp *qp;
  632. /* Protects from UMR QP overflow
  633. */
  634. struct semaphore sem;
  635. /* Protects from using UMR while the UMR is not active
  636. */
  637. struct mutex lock;
  638. unsigned int state;
  639. };
  640. struct mlx5_cache_ent {
  641. struct xarray mkeys;
  642. unsigned long stored;
  643. unsigned long reserved;
  644. char name[4];
  645. u32 order;
  646. u32 access_mode;
  647. u32 page;
  648. unsigned int ndescs;
  649. u8 disabled:1;
  650. u8 fill_to_high_water:1;
  651. /*
  652. * - limit is the low water mark for stored mkeys, 2* limit is the
  653. * upper water mark.
  654. */
  655. u32 in_use;
  656. u32 limit;
  657. /* Statistics */
  658. u32 miss;
  659. struct mlx5_ib_dev *dev;
  660. struct delayed_work dwork;
  661. };
  662. struct mlx5r_async_create_mkey {
  663. union {
  664. u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
  665. u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
  666. };
  667. struct mlx5_async_work cb_work;
  668. struct mlx5_cache_ent *ent;
  669. u32 mkey;
  670. };
  671. struct mlx5_mkey_cache {
  672. struct workqueue_struct *wq;
  673. struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
  674. struct dentry *root;
  675. unsigned long last_add;
  676. };
  677. struct mlx5_ib_port_resources {
  678. struct mlx5_ib_gsi_qp *gsi;
  679. struct work_struct pkey_change_work;
  680. };
  681. struct mlx5_ib_resources {
  682. struct ib_cq *c0;
  683. u32 xrcdn0;
  684. u32 xrcdn1;
  685. struct ib_pd *p0;
  686. struct ib_srq *s0;
  687. struct ib_srq *s1;
  688. struct mlx5_ib_port_resources ports[2];
  689. };
  690. #define MAX_OPFC_RULES 2
  691. struct mlx5_ib_op_fc {
  692. struct mlx5_fc *fc;
  693. struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
  694. };
  695. struct mlx5_ib_counters {
  696. struct rdma_stat_desc *descs;
  697. size_t *offsets;
  698. u32 num_q_counters;
  699. u32 num_cong_counters;
  700. u32 num_ext_ppcnt_counters;
  701. u32 num_op_counters;
  702. u16 set_id;
  703. struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
  704. };
  705. int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
  706. struct mlx5_ib_op_fc *opfc,
  707. enum mlx5_ib_optional_counter_type type);
  708. void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
  709. struct mlx5_ib_op_fc *opfc,
  710. enum mlx5_ib_optional_counter_type type);
  711. struct mlx5_ib_multiport_info;
  712. struct mlx5_ib_multiport {
  713. struct mlx5_ib_multiport_info *mpi;
  714. /* To be held when accessing the multiport info */
  715. spinlock_t mpi_lock;
  716. };
  717. struct mlx5_roce {
  718. /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
  719. * netdev pointer
  720. */
  721. rwlock_t netdev_lock;
  722. struct net_device *netdev;
  723. struct notifier_block nb;
  724. atomic_t tx_port_affinity;
  725. enum ib_port_state last_port_state;
  726. struct mlx5_ib_dev *dev;
  727. u32 native_port_num;
  728. };
  729. struct mlx5_ib_port {
  730. struct mlx5_ib_counters cnts;
  731. struct mlx5_ib_multiport mp;
  732. struct mlx5_ib_dbg_cc_params *dbg_cc_params;
  733. struct mlx5_roce roce;
  734. struct mlx5_eswitch_rep *rep;
  735. };
  736. struct mlx5_ib_dbg_param {
  737. int offset;
  738. struct mlx5_ib_dev *dev;
  739. struct dentry *dentry;
  740. u32 port_num;
  741. };
  742. enum mlx5_ib_dbg_cc_types {
  743. MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
  744. MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
  745. MLX5_IB_DBG_CC_RP_TIME_RESET,
  746. MLX5_IB_DBG_CC_RP_BYTE_RESET,
  747. MLX5_IB_DBG_CC_RP_THRESHOLD,
  748. MLX5_IB_DBG_CC_RP_AI_RATE,
  749. MLX5_IB_DBG_CC_RP_MAX_RATE,
  750. MLX5_IB_DBG_CC_RP_HAI_RATE,
  751. MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
  752. MLX5_IB_DBG_CC_RP_MIN_RATE,
  753. MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
  754. MLX5_IB_DBG_CC_RP_DCE_TCP_G,
  755. MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
  756. MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
  757. MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
  758. MLX5_IB_DBG_CC_RP_GD,
  759. MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
  760. MLX5_IB_DBG_CC_NP_CNP_DSCP,
  761. MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
  762. MLX5_IB_DBG_CC_NP_CNP_PRIO,
  763. MLX5_IB_DBG_CC_MAX,
  764. };
  765. struct mlx5_ib_dbg_cc_params {
  766. struct dentry *root;
  767. struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
  768. };
  769. enum {
  770. MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
  771. };
  772. struct mlx5_ib_delay_drop {
  773. struct mlx5_ib_dev *dev;
  774. struct work_struct delay_drop_work;
  775. /* serialize setting of delay drop */
  776. struct mutex lock;
  777. u32 timeout;
  778. bool activate;
  779. atomic_t events_cnt;
  780. atomic_t rqs_cnt;
  781. struct dentry *dir_debugfs;
  782. };
  783. enum mlx5_ib_stages {
  784. MLX5_IB_STAGE_INIT,
  785. MLX5_IB_STAGE_FS,
  786. MLX5_IB_STAGE_CAPS,
  787. MLX5_IB_STAGE_NON_DEFAULT_CB,
  788. MLX5_IB_STAGE_ROCE,
  789. MLX5_IB_STAGE_QP,
  790. MLX5_IB_STAGE_SRQ,
  791. MLX5_IB_STAGE_DEVICE_RESOURCES,
  792. MLX5_IB_STAGE_DEVICE_NOTIFIER,
  793. MLX5_IB_STAGE_ODP,
  794. MLX5_IB_STAGE_COUNTERS,
  795. MLX5_IB_STAGE_CONG_DEBUGFS,
  796. MLX5_IB_STAGE_UAR,
  797. MLX5_IB_STAGE_BFREG,
  798. MLX5_IB_STAGE_PRE_IB_REG_UMR,
  799. MLX5_IB_STAGE_WHITELIST_UID,
  800. MLX5_IB_STAGE_IB_REG,
  801. MLX5_IB_STAGE_POST_IB_REG_UMR,
  802. MLX5_IB_STAGE_DELAY_DROP,
  803. MLX5_IB_STAGE_RESTRACK,
  804. MLX5_IB_STAGE_MAX,
  805. };
  806. struct mlx5_ib_stage {
  807. int (*init)(struct mlx5_ib_dev *dev);
  808. void (*cleanup)(struct mlx5_ib_dev *dev);
  809. };
  810. #define STAGE_CREATE(_stage, _init, _cleanup) \
  811. .stage[_stage] = {.init = _init, .cleanup = _cleanup}
  812. struct mlx5_ib_profile {
  813. struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
  814. };
  815. struct mlx5_ib_multiport_info {
  816. struct list_head list;
  817. struct mlx5_ib_dev *ibdev;
  818. struct mlx5_core_dev *mdev;
  819. struct notifier_block mdev_events;
  820. struct completion unref_comp;
  821. u64 sys_image_guid;
  822. u32 mdev_refcnt;
  823. bool is_master;
  824. bool unaffiliate;
  825. };
  826. struct mlx5_ib_flow_action {
  827. struct ib_flow_action ib_action;
  828. union {
  829. struct {
  830. u64 ib_flags;
  831. struct mlx5_accel_esp_xfrm *ctx;
  832. } esp_aes_gcm;
  833. struct {
  834. struct mlx5_ib_dev *dev;
  835. u32 sub_type;
  836. union {
  837. struct mlx5_modify_hdr *modify_hdr;
  838. struct mlx5_pkt_reformat *pkt_reformat;
  839. };
  840. } flow_action_raw;
  841. };
  842. };
  843. struct mlx5_dm {
  844. struct mlx5_core_dev *dev;
  845. /* This lock is used to protect the access to the shared
  846. * allocation map when concurrent requests by different
  847. * processes are handled.
  848. */
  849. spinlock_t lock;
  850. DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
  851. };
  852. struct mlx5_read_counters_attr {
  853. struct mlx5_fc *hw_cntrs_hndl;
  854. u64 *out;
  855. u32 flags;
  856. };
  857. enum mlx5_ib_counters_type {
  858. MLX5_IB_COUNTERS_FLOW,
  859. };
  860. struct mlx5_ib_mcounters {
  861. struct ib_counters ibcntrs;
  862. enum mlx5_ib_counters_type type;
  863. /* number of counters supported for this counters type */
  864. u32 counters_num;
  865. struct mlx5_fc *hw_cntrs_hndl;
  866. /* read function for this counters type */
  867. int (*read_counters)(struct ib_device *ibdev,
  868. struct mlx5_read_counters_attr *read_attr);
  869. /* max index set as part of create_flow */
  870. u32 cntrs_max_index;
  871. /* number of counters data entries (<description,index> pair) */
  872. u32 ncounters;
  873. /* counters data array for descriptions and indexes */
  874. struct mlx5_ib_flow_counters_desc *counters_data;
  875. /* protects access to mcounters internal data */
  876. struct mutex mcntrs_mutex;
  877. };
  878. static inline struct mlx5_ib_mcounters *
  879. to_mcounters(struct ib_counters *ibcntrs)
  880. {
  881. return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
  882. }
  883. int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
  884. bool is_egress,
  885. struct mlx5_flow_act *action);
  886. struct mlx5_ib_lb_state {
  887. /* protect the user_td */
  888. struct mutex mutex;
  889. u32 user_td;
  890. int qps;
  891. bool enabled;
  892. };
  893. struct mlx5_ib_pf_eq {
  894. struct notifier_block irq_nb;
  895. struct mlx5_ib_dev *dev;
  896. struct mlx5_eq *core;
  897. struct work_struct work;
  898. spinlock_t lock; /* Pagefaults spinlock */
  899. struct workqueue_struct *wq;
  900. mempool_t *pool;
  901. };
  902. struct mlx5_devx_event_table {
  903. struct mlx5_nb devx_nb;
  904. /* serialize updating the event_xa */
  905. struct mutex event_xa_lock;
  906. struct xarray event_xa;
  907. };
  908. struct mlx5_var_table {
  909. /* serialize updating the bitmap */
  910. struct mutex bitmap_lock;
  911. unsigned long *bitmap;
  912. u64 hw_start_addr;
  913. u32 stride_size;
  914. u64 num_var_hw_entries;
  915. };
  916. struct mlx5_port_caps {
  917. bool has_smi;
  918. u8 ext_port_cap;
  919. };
  920. struct mlx5_ib_dev {
  921. struct ib_device ib_dev;
  922. struct mlx5_core_dev *mdev;
  923. struct notifier_block mdev_events;
  924. int num_ports;
  925. /* serialize update of capability mask
  926. */
  927. struct mutex cap_mask_mutex;
  928. u8 ib_active:1;
  929. u8 is_rep:1;
  930. u8 lag_active:1;
  931. u8 wc_support:1;
  932. u8 fill_delay;
  933. struct umr_common umrc;
  934. /* sync used page count stats
  935. */
  936. struct mlx5_ib_resources devr;
  937. atomic_t mkey_var;
  938. struct mlx5_mkey_cache cache;
  939. struct timer_list delay_timer;
  940. /* Prevents soft lock on massive reg MRs */
  941. struct mutex slow_path_mutex;
  942. struct ib_odp_caps odp_caps;
  943. u64 odp_max_size;
  944. struct mutex odp_eq_mutex;
  945. struct mlx5_ib_pf_eq odp_pf_eq;
  946. struct xarray odp_mkeys;
  947. u32 null_mkey;
  948. struct mlx5_ib_flow_db *flow_db;
  949. /* protect resources needed as part of reset flow */
  950. spinlock_t reset_flow_resource_lock;
  951. struct list_head qp_list;
  952. /* Array with num_ports elements */
  953. struct mlx5_ib_port *port;
  954. struct mlx5_sq_bfreg bfreg;
  955. struct mlx5_sq_bfreg wc_bfreg;
  956. struct mlx5_sq_bfreg fp_bfreg;
  957. struct mlx5_ib_delay_drop delay_drop;
  958. const struct mlx5_ib_profile *profile;
  959. struct mlx5_ib_lb_state lb;
  960. u8 umr_fence;
  961. struct list_head ib_dev_list;
  962. u64 sys_image_guid;
  963. struct mlx5_dm dm;
  964. u16 devx_whitelist_uid;
  965. struct mlx5_srq_table srq_table;
  966. struct mlx5_qp_table qp_table;
  967. struct mlx5_async_ctx async_ctx;
  968. struct mlx5_devx_event_table devx_event_table;
  969. struct mlx5_var_table var_table;
  970. struct xarray sig_mrs;
  971. struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
  972. u16 pkey_table_len;
  973. u8 lag_ports;
  974. };
  975. static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
  976. {
  977. return container_of(mcq, struct mlx5_ib_cq, mcq);
  978. }
  979. static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
  980. {
  981. return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
  982. }
  983. static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
  984. {
  985. return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
  986. }
  987. static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
  988. {
  989. return to_mdev(mr->ibmr.device);
  990. }
  991. static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
  992. {
  993. struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
  994. udata, struct mlx5_ib_ucontext, ibucontext);
  995. return to_mdev(context->ibucontext.device);
  996. }
  997. static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
  998. {
  999. return container_of(ibcq, struct mlx5_ib_cq, ibcq);
  1000. }
  1001. static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
  1002. {
  1003. return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
  1004. }
  1005. static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
  1006. {
  1007. return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
  1008. }
  1009. static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
  1010. {
  1011. return container_of(ibpd, struct mlx5_ib_pd, ibpd);
  1012. }
  1013. static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
  1014. {
  1015. return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
  1016. }
  1017. static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
  1018. {
  1019. return container_of(ibqp, struct mlx5_ib_qp, ibqp);
  1020. }
  1021. static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
  1022. {
  1023. return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
  1024. }
  1025. static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
  1026. {
  1027. return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
  1028. }
  1029. static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
  1030. {
  1031. return container_of(msrq, struct mlx5_ib_srq, msrq);
  1032. }
  1033. static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
  1034. {
  1035. return container_of(ibmr, struct mlx5_ib_mr, ibmr);
  1036. }
  1037. static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
  1038. {
  1039. return container_of(ibmw, struct mlx5_ib_mw, ibmw);
  1040. }
  1041. static inline struct mlx5_ib_flow_action *
  1042. to_mflow_act(struct ib_flow_action *ibact)
  1043. {
  1044. return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
  1045. }
  1046. static inline struct mlx5_user_mmap_entry *
  1047. to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
  1048. {
  1049. return container_of(rdma_entry,
  1050. struct mlx5_user_mmap_entry, rdma_entry);
  1051. }
  1052. int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
  1053. struct mlx5_db *db);
  1054. void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
  1055. void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
  1056. void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
  1057. void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
  1058. int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
  1059. struct ib_udata *udata);
  1060. int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
  1061. static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
  1062. {
  1063. return 0;
  1064. }
  1065. int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
  1066. struct ib_udata *udata);
  1067. int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  1068. enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
  1069. int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
  1070. int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
  1071. int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  1072. const struct ib_recv_wr **bad_wr);
  1073. int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
  1074. void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
  1075. int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
  1076. struct ib_udata *udata);
  1077. int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1078. int attr_mask, struct ib_udata *udata);
  1079. int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
  1080. struct ib_qp_init_attr *qp_init_attr);
  1081. int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
  1082. void mlx5_ib_drain_sq(struct ib_qp *qp);
  1083. void mlx5_ib_drain_rq(struct ib_qp *qp);
  1084. int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
  1085. size_t buflen, size_t *bc);
  1086. int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
  1087. size_t buflen, size_t *bc);
  1088. int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
  1089. size_t buflen, size_t *bc);
  1090. int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
  1091. struct ib_udata *udata);
  1092. int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
  1093. int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
  1094. int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
  1095. int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
  1096. int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
  1097. struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
  1098. struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  1099. u64 virt_addr, int access_flags,
  1100. struct ib_udata *udata);
  1101. struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
  1102. u64 length, u64 virt_addr,
  1103. int fd, int access_flags,
  1104. struct ib_udata *udata);
  1105. int mlx5_ib_advise_mr(struct ib_pd *pd,
  1106. enum ib_uverbs_advise_mr_advice advice,
  1107. u32 flags,
  1108. struct ib_sge *sg_list,
  1109. u32 num_sge,
  1110. struct uverbs_attr_bundle *attrs);
  1111. int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
  1112. int mlx5_ib_dealloc_mw(struct ib_mw *mw);
  1113. struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
  1114. int access_flags);
  1115. void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
  1116. void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
  1117. struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
  1118. u64 length, u64 virt_addr, int access_flags,
  1119. struct ib_pd *pd, struct ib_udata *udata);
  1120. int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
  1121. struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
  1122. u32 max_num_sg);
  1123. struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
  1124. u32 max_num_sg,
  1125. u32 max_num_meta_sg);
  1126. int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  1127. unsigned int *sg_offset);
  1128. int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
  1129. int data_sg_nents, unsigned int *data_sg_offset,
  1130. struct scatterlist *meta_sg, int meta_sg_nents,
  1131. unsigned int *meta_sg_offset);
  1132. int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
  1133. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  1134. const struct ib_mad *in, struct ib_mad *out,
  1135. size_t *out_mad_size, u16 *out_mad_pkey_index);
  1136. int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
  1137. int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
  1138. int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
  1139. int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
  1140. __be64 *sys_image_guid);
  1141. int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
  1142. u16 *max_pkeys);
  1143. int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
  1144. u32 *vendor_id);
  1145. int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
  1146. int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
  1147. int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
  1148. u16 *pkey);
  1149. int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
  1150. union ib_gid *gid);
  1151. int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
  1152. struct ib_port_attr *props);
  1153. int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
  1154. struct ib_port_attr *props);
  1155. void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
  1156. u64 access_flags);
  1157. void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
  1158. int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
  1159. int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
  1160. int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
  1161. struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
  1162. struct mlx5_cache_ent *ent,
  1163. int access_flags);
  1164. int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
  1165. struct ib_mr_status *mr_status);
  1166. struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
  1167. struct ib_wq_init_attr *init_attr,
  1168. struct ib_udata *udata);
  1169. int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
  1170. int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
  1171. u32 wq_attr_mask, struct ib_udata *udata);
  1172. int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
  1173. struct ib_rwq_ind_table_init_attr *init_attr,
  1174. struct ib_udata *udata);
  1175. int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
  1176. struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
  1177. struct ib_dm_mr_attr *attr,
  1178. struct uverbs_attr_bundle *attrs);
  1179. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1180. int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
  1181. int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
  1182. void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
  1183. int __init mlx5_ib_odp_init(void);
  1184. void mlx5_ib_odp_cleanup(void);
  1185. void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
  1186. void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
  1187. struct mlx5_ib_mr *mr, int flags);
  1188. int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
  1189. enum ib_uverbs_advise_mr_advice advice,
  1190. u32 flags, struct ib_sge *sg_list, u32 num_sge);
  1191. int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
  1192. int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
  1193. #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  1194. static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
  1195. static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
  1196. struct mlx5_ib_pf_eq *eq)
  1197. {
  1198. return 0;
  1199. }
  1200. static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
  1201. static inline int mlx5_ib_odp_init(void) { return 0; }
  1202. static inline void mlx5_ib_odp_cleanup(void) {}
  1203. static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
  1204. static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
  1205. struct mlx5_ib_mr *mr, int flags) {}
  1206. static inline int
  1207. mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
  1208. enum ib_uverbs_advise_mr_advice advice, u32 flags,
  1209. struct ib_sge *sg_list, u32 num_sge)
  1210. {
  1211. return -EOPNOTSUPP;
  1212. }
  1213. static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
  1214. {
  1215. return -EOPNOTSUPP;
  1216. }
  1217. static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
  1218. {
  1219. return -EOPNOTSUPP;
  1220. }
  1221. #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  1222. extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
  1223. /* Needed for rep profile */
  1224. void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
  1225. const struct mlx5_ib_profile *profile,
  1226. int stage);
  1227. int __mlx5_ib_add(struct mlx5_ib_dev *dev,
  1228. const struct mlx5_ib_profile *profile);
  1229. int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
  1230. u32 port, struct ifla_vf_info *info);
  1231. int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
  1232. u32 port, int state);
  1233. int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
  1234. u32 port, struct ifla_vf_stats *stats);
  1235. int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
  1236. struct ifla_vf_guid *node_guid,
  1237. struct ifla_vf_guid *port_guid);
  1238. int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
  1239. u64 guid, int type);
  1240. __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
  1241. const struct ib_gid_attr *attr);
  1242. void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
  1243. void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
  1244. /* GSI QP helper functions */
  1245. int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
  1246. struct ib_qp_init_attr *attr);
  1247. int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
  1248. int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
  1249. int attr_mask);
  1250. int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
  1251. int qp_attr_mask,
  1252. struct ib_qp_init_attr *qp_init_attr);
  1253. int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
  1254. const struct ib_send_wr **bad_wr);
  1255. int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
  1256. const struct ib_recv_wr **bad_wr);
  1257. void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
  1258. int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
  1259. void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
  1260. int bfregn);
  1261. struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
  1262. struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
  1263. u32 ib_port_num,
  1264. u32 *native_port_num);
  1265. void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
  1266. u32 port_num);
  1267. extern const struct uapi_definition mlx5_ib_devx_defs[];
  1268. extern const struct uapi_definition mlx5_ib_flow_defs[];
  1269. extern const struct uapi_definition mlx5_ib_qos_defs[];
  1270. extern const struct uapi_definition mlx5_ib_std_types_defs[];
  1271. static inline int is_qp1(enum ib_qp_type qp_type)
  1272. {
  1273. return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
  1274. }
  1275. static inline u32 check_cq_create_flags(u32 flags)
  1276. {
  1277. /*
  1278. * It returns non-zero value for unsupported CQ
  1279. * create flags, otherwise it returns zero.
  1280. */
  1281. return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
  1282. IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
  1283. }
  1284. static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
  1285. u32 *user_index)
  1286. {
  1287. if (cqe_version) {
  1288. if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
  1289. (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
  1290. return -EINVAL;
  1291. *user_index = cmd_uidx;
  1292. } else {
  1293. *user_index = MLX5_IB_DEFAULT_UIDX;
  1294. }
  1295. return 0;
  1296. }
  1297. static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
  1298. struct mlx5_ib_create_qp *ucmd,
  1299. int inlen,
  1300. u32 *user_index)
  1301. {
  1302. u8 cqe_version = ucontext->cqe_version;
  1303. if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
  1304. (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
  1305. return 0;
  1306. if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
  1307. return -EINVAL;
  1308. return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
  1309. }
  1310. static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
  1311. struct mlx5_ib_create_srq *ucmd,
  1312. int inlen,
  1313. u32 *user_index)
  1314. {
  1315. u8 cqe_version = ucontext->cqe_version;
  1316. if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
  1317. (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
  1318. return 0;
  1319. if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
  1320. return -EINVAL;
  1321. return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
  1322. }
  1323. static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
  1324. {
  1325. return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1326. MLX5_UARS_IN_PAGE : 1;
  1327. }
  1328. extern void *xlt_emergency_page;
  1329. int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
  1330. struct mlx5_bfreg_info *bfregi, u32 bfregn,
  1331. bool dyn_bfreg);
  1332. static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
  1333. struct mlx5_ib_mkey *mmkey)
  1334. {
  1335. refcount_set(&mmkey->usecount, 1);
  1336. return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
  1337. mmkey, GFP_KERNEL));
  1338. }
  1339. /* deref an mkey that can participate in ODP flow */
  1340. static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
  1341. {
  1342. if (refcount_dec_and_test(&mmkey->usecount))
  1343. wake_up(&mmkey->wait);
  1344. }
  1345. /* deref an mkey that can participate in ODP flow and wait for relese */
  1346. static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
  1347. {
  1348. mlx5r_deref_odp_mkey(mmkey);
  1349. wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
  1350. }
  1351. int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
  1352. static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
  1353. {
  1354. /*
  1355. * If the driver is in hash mode and the port_select_flow_table_bypass cap
  1356. * is supported, it means that the driver no longer needs to assign the port
  1357. * affinity by default. If a user wants to set the port affinity explicitly,
  1358. * the user has a dedicated API to do that, so there is no need to assign
  1359. * the port affinity by default.
  1360. */
  1361. if (dev->lag_active &&
  1362. mlx5_lag_mode_is_hash(dev->mdev) &&
  1363. MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
  1364. return 0;
  1365. if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
  1366. return 0;
  1367. return dev->lag_active ||
  1368. (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
  1369. MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
  1370. }
  1371. static inline bool rt_supported(int ts_cap)
  1372. {
  1373. return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
  1374. ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
  1375. }
  1376. /*
  1377. * PCI Peer to Peer is a trainwreck. If no switch is present then things
  1378. * sometimes work, depending on the pci_distance_p2p logic for excluding broken
  1379. * root complexes. However if a switch is present in the path, then things get
  1380. * really ugly depending on how the switch is setup. This table assumes that the
  1381. * root complex is strict and is validating that all req/reps are matches
  1382. * perfectly - so any scenario where it sees only half the transaction is a
  1383. * failure.
  1384. *
  1385. * CR/RR/DT ATS RO P2P
  1386. * 00X X X OK
  1387. * 010 X X fails (request is routed to root but root never sees comp)
  1388. * 011 0 X fails (request is routed to root but root never sees comp)
  1389. * 011 1 X OK
  1390. * 10X X 1 OK
  1391. * 101 X 0 fails (completion is routed to root but root didn't see req)
  1392. * 110 X 0 SLOW
  1393. * 111 0 0 SLOW
  1394. * 111 1 0 fails (completion is routed to root but root didn't see req)
  1395. * 111 1 1 OK
  1396. *
  1397. * Unfortunately we cannot reliably know if a switch is present or what the
  1398. * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
  1399. * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
  1400. *
  1401. * For now assume if the umem is a dma_buf then it is P2P.
  1402. */
  1403. static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
  1404. struct ib_umem *umem, int access_flags)
  1405. {
  1406. if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
  1407. return false;
  1408. return access_flags & IB_ACCESS_RELAXED_ORDERING;
  1409. }
  1410. #endif /* MLX5_IB_H */