uk.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666
  1. // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
  2. /* Copyright (c) 2015 - 2021 Intel Corporation */
  3. #include "osdep.h"
  4. #include "defs.h"
  5. #include "user.h"
  6. #include "irdma.h"
  7. /**
  8. * irdma_set_fragment - set fragment in wqe
  9. * @wqe: wqe for setting fragment
  10. * @offset: offset value
  11. * @sge: sge length and stag
  12. * @valid: The wqe valid
  13. */
  14. static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
  15. u8 valid)
  16. {
  17. if (sge) {
  18. set_64bit_val(wqe, offset,
  19. FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
  20. set_64bit_val(wqe, offset + 8,
  21. FIELD_PREP(IRDMAQPSQ_VALID, valid) |
  22. FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
  23. FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
  24. } else {
  25. set_64bit_val(wqe, offset, 0);
  26. set_64bit_val(wqe, offset + 8,
  27. FIELD_PREP(IRDMAQPSQ_VALID, valid));
  28. }
  29. }
  30. /**
  31. * irdma_set_fragment_gen_1 - set fragment in wqe
  32. * @wqe: wqe for setting fragment
  33. * @offset: offset value
  34. * @sge: sge length and stag
  35. * @valid: wqe valid flag
  36. */
  37. static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
  38. struct ib_sge *sge, u8 valid)
  39. {
  40. if (sge) {
  41. set_64bit_val(wqe, offset,
  42. FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
  43. set_64bit_val(wqe, offset + 8,
  44. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
  45. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
  46. } else {
  47. set_64bit_val(wqe, offset, 0);
  48. set_64bit_val(wqe, offset + 8, 0);
  49. }
  50. }
  51. /**
  52. * irdma_nop_1 - insert a NOP wqe
  53. * @qp: hw qp ptr
  54. */
  55. static int irdma_nop_1(struct irdma_qp_uk *qp)
  56. {
  57. u64 hdr;
  58. __le64 *wqe;
  59. u32 wqe_idx;
  60. bool signaled = false;
  61. if (!qp->sq_ring.head)
  62. return -EINVAL;
  63. wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
  64. wqe = qp->sq_base[wqe_idx].elem;
  65. qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
  66. set_64bit_val(wqe, 0, 0);
  67. set_64bit_val(wqe, 8, 0);
  68. set_64bit_val(wqe, 16, 0);
  69. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
  70. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
  71. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  72. /* make sure WQE is written before valid bit is set */
  73. dma_wmb();
  74. set_64bit_val(wqe, 24, hdr);
  75. return 0;
  76. }
  77. /**
  78. * irdma_clr_wqes - clear next 128 sq entries
  79. * @qp: hw qp ptr
  80. * @qp_wqe_idx: wqe_idx
  81. */
  82. void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
  83. {
  84. struct irdma_qp_quanta *sq;
  85. u32 wqe_idx;
  86. if (!(qp_wqe_idx & 0x7F)) {
  87. wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
  88. sq = qp->sq_base + wqe_idx;
  89. if (wqe_idx)
  90. memset(sq, qp->swqe_polarity ? 0 : 0xFF,
  91. 128 * sizeof(*sq));
  92. else
  93. memset(sq, qp->swqe_polarity ? 0xFF : 0,
  94. 128 * sizeof(*sq));
  95. }
  96. }
  97. /**
  98. * irdma_uk_qp_post_wr - ring doorbell
  99. * @qp: hw qp ptr
  100. */
  101. void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
  102. {
  103. u64 temp;
  104. u32 hw_sq_tail;
  105. u32 sw_sq_head;
  106. /* valid bit is written and loads completed before reading shadow */
  107. mb();
  108. /* read the doorbell shadow area */
  109. get_64bit_val(qp->shadow_area, 0, &temp);
  110. hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
  111. sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
  112. if (sw_sq_head != qp->initial_ring.head) {
  113. if (qp->push_dropped) {
  114. writel(qp->qp_id, qp->wqe_alloc_db);
  115. qp->push_dropped = false;
  116. } else if (sw_sq_head != hw_sq_tail) {
  117. if (sw_sq_head > qp->initial_ring.head) {
  118. if (hw_sq_tail >= qp->initial_ring.head &&
  119. hw_sq_tail < sw_sq_head)
  120. writel(qp->qp_id, qp->wqe_alloc_db);
  121. } else {
  122. if (hw_sq_tail >= qp->initial_ring.head ||
  123. hw_sq_tail < sw_sq_head)
  124. writel(qp->qp_id, qp->wqe_alloc_db);
  125. }
  126. }
  127. }
  128. qp->initial_ring.head = qp->sq_ring.head;
  129. }
  130. /**
  131. * irdma_qp_ring_push_db - ring qp doorbell
  132. * @qp: hw qp ptr
  133. * @wqe_idx: wqe index
  134. */
  135. static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
  136. {
  137. set_32bit_val(qp->push_db, 0,
  138. FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
  139. qp->initial_ring.head = qp->sq_ring.head;
  140. qp->push_mode = true;
  141. qp->push_dropped = false;
  142. }
  143. void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
  144. u32 wqe_idx, bool post_sq)
  145. {
  146. __le64 *push;
  147. if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
  148. IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
  149. !qp->push_mode) {
  150. if (post_sq)
  151. irdma_uk_qp_post_wr(qp);
  152. } else {
  153. push = (__le64 *)((uintptr_t)qp->push_wqe +
  154. (wqe_idx & 0x7) * 0x20);
  155. memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
  156. irdma_qp_ring_push_db(qp, wqe_idx);
  157. }
  158. }
  159. /**
  160. * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
  161. * @qp: hw qp ptr
  162. * @wqe_idx: return wqe index
  163. * @quanta: size of WR in quanta
  164. * @total_size: size of WR in bytes
  165. * @info: info on WR
  166. */
  167. __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
  168. u16 quanta, u32 total_size,
  169. struct irdma_post_sq_info *info)
  170. {
  171. __le64 *wqe;
  172. __le64 *wqe_0 = NULL;
  173. u32 nop_wqe_idx;
  174. u16 avail_quanta;
  175. u16 i;
  176. avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
  177. (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
  178. qp->uk_attrs->max_hw_sq_chunk);
  179. if (quanta <= avail_quanta) {
  180. /* WR fits in current chunk */
  181. if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
  182. return NULL;
  183. } else {
  184. /* Need to pad with NOP */
  185. if (quanta + avail_quanta >
  186. IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
  187. return NULL;
  188. nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
  189. for (i = 0; i < avail_quanta; i++) {
  190. irdma_nop_1(qp);
  191. IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
  192. }
  193. if (qp->push_db && info->push_wqe)
  194. irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
  195. avail_quanta, nop_wqe_idx, true);
  196. }
  197. *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
  198. if (!*wqe_idx)
  199. qp->swqe_polarity = !qp->swqe_polarity;
  200. IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
  201. wqe = qp->sq_base[*wqe_idx].elem;
  202. if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
  203. (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
  204. wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
  205. wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
  206. }
  207. qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
  208. qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
  209. qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
  210. return wqe;
  211. }
  212. /**
  213. * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
  214. * @qp: hw qp ptr
  215. * @wqe_idx: return wqe index
  216. */
  217. __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
  218. {
  219. __le64 *wqe;
  220. int ret_code;
  221. if (IRDMA_RING_FULL_ERR(qp->rq_ring))
  222. return NULL;
  223. IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
  224. if (ret_code)
  225. return NULL;
  226. if (!*wqe_idx)
  227. qp->rwqe_polarity = !qp->rwqe_polarity;
  228. /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
  229. wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
  230. return wqe;
  231. }
  232. /**
  233. * irdma_uk_rdma_write - rdma write operation
  234. * @qp: hw qp ptr
  235. * @info: post sq information
  236. * @post_sq: flag to post sq
  237. */
  238. int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
  239. bool post_sq)
  240. {
  241. u64 hdr;
  242. __le64 *wqe;
  243. struct irdma_rdma_write *op_info;
  244. u32 i, wqe_idx;
  245. u32 total_size = 0, byte_off;
  246. int ret_code;
  247. u32 frag_cnt, addl_frag_cnt;
  248. bool read_fence = false;
  249. u16 quanta;
  250. info->push_wqe = qp->push_db ? true : false;
  251. op_info = &info->op.rdma_write;
  252. if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
  253. return -EINVAL;
  254. for (i = 0; i < op_info->num_lo_sges; i++)
  255. total_size += op_info->lo_sg_list[i].length;
  256. read_fence |= info->read_fence;
  257. if (info->imm_data_valid)
  258. frag_cnt = op_info->num_lo_sges + 1;
  259. else
  260. frag_cnt = op_info->num_lo_sges;
  261. addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
  262. ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
  263. if (ret_code)
  264. return ret_code;
  265. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
  266. info);
  267. if (!wqe)
  268. return -ENOMEM;
  269. irdma_clr_wqes(qp, wqe_idx);
  270. set_64bit_val(wqe, 16,
  271. FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
  272. if (info->imm_data_valid) {
  273. set_64bit_val(wqe, 0,
  274. FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
  275. i = 0;
  276. } else {
  277. qp->wqe_ops.iw_set_fragment(wqe, 0,
  278. op_info->lo_sg_list,
  279. qp->swqe_polarity);
  280. i = 1;
  281. }
  282. for (byte_off = 32; i < op_info->num_lo_sges; i++) {
  283. qp->wqe_ops.iw_set_fragment(wqe, byte_off,
  284. &op_info->lo_sg_list[i],
  285. qp->swqe_polarity);
  286. byte_off += 16;
  287. }
  288. /* if not an odd number set valid bit in next fragment */
  289. if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
  290. frag_cnt) {
  291. qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
  292. qp->swqe_polarity);
  293. if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
  294. ++addl_frag_cnt;
  295. }
  296. hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
  297. FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
  298. FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
  299. FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
  300. FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
  301. FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
  302. FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
  303. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
  304. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  305. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  306. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  307. set_64bit_val(wqe, 24, hdr);
  308. if (info->push_wqe) {
  309. irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
  310. } else {
  311. if (post_sq)
  312. irdma_uk_qp_post_wr(qp);
  313. }
  314. return 0;
  315. }
  316. /**
  317. * irdma_uk_rdma_read - rdma read command
  318. * @qp: hw qp ptr
  319. * @info: post sq information
  320. * @inv_stag: flag for inv_stag
  321. * @post_sq: flag to post sq
  322. */
  323. int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
  324. bool inv_stag, bool post_sq)
  325. {
  326. struct irdma_rdma_read *op_info;
  327. int ret_code;
  328. u32 i, byte_off, total_size = 0;
  329. bool local_fence = false;
  330. u32 addl_frag_cnt;
  331. __le64 *wqe;
  332. u32 wqe_idx;
  333. u16 quanta;
  334. u64 hdr;
  335. info->push_wqe = qp->push_db ? true : false;
  336. op_info = &info->op.rdma_read;
  337. if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
  338. return -EINVAL;
  339. for (i = 0; i < op_info->num_lo_sges; i++)
  340. total_size += op_info->lo_sg_list[i].length;
  341. ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
  342. if (ret_code)
  343. return ret_code;
  344. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
  345. info);
  346. if (!wqe)
  347. return -ENOMEM;
  348. irdma_clr_wqes(qp, wqe_idx);
  349. addl_frag_cnt = op_info->num_lo_sges > 1 ?
  350. (op_info->num_lo_sges - 1) : 0;
  351. local_fence |= info->local_fence;
  352. qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
  353. qp->swqe_polarity);
  354. for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
  355. qp->wqe_ops.iw_set_fragment(wqe, byte_off,
  356. &op_info->lo_sg_list[i],
  357. qp->swqe_polarity);
  358. byte_off += 16;
  359. }
  360. /* if not an odd number set valid bit in next fragment */
  361. if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
  362. !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
  363. qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
  364. qp->swqe_polarity);
  365. if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
  366. ++addl_frag_cnt;
  367. }
  368. set_64bit_val(wqe, 16,
  369. FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
  370. hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
  371. FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
  372. FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
  373. FIELD_PREP(IRDMAQPSQ_OPCODE,
  374. (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
  375. FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
  376. FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
  377. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
  378. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  379. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  380. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  381. set_64bit_val(wqe, 24, hdr);
  382. if (info->push_wqe) {
  383. irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
  384. } else {
  385. if (post_sq)
  386. irdma_uk_qp_post_wr(qp);
  387. }
  388. return 0;
  389. }
  390. /**
  391. * irdma_uk_send - rdma send command
  392. * @qp: hw qp ptr
  393. * @info: post sq information
  394. * @post_sq: flag to post sq
  395. */
  396. int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
  397. bool post_sq)
  398. {
  399. __le64 *wqe;
  400. struct irdma_post_send *op_info;
  401. u64 hdr;
  402. u32 i, wqe_idx, total_size = 0, byte_off;
  403. int ret_code;
  404. u32 frag_cnt, addl_frag_cnt;
  405. bool read_fence = false;
  406. u16 quanta;
  407. info->push_wqe = qp->push_db ? true : false;
  408. op_info = &info->op.send;
  409. if (qp->max_sq_frag_cnt < op_info->num_sges)
  410. return -EINVAL;
  411. for (i = 0; i < op_info->num_sges; i++)
  412. total_size += op_info->sg_list[i].length;
  413. if (info->imm_data_valid)
  414. frag_cnt = op_info->num_sges + 1;
  415. else
  416. frag_cnt = op_info->num_sges;
  417. ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
  418. if (ret_code)
  419. return ret_code;
  420. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
  421. info);
  422. if (!wqe)
  423. return -ENOMEM;
  424. irdma_clr_wqes(qp, wqe_idx);
  425. read_fence |= info->read_fence;
  426. addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
  427. if (info->imm_data_valid) {
  428. set_64bit_val(wqe, 0,
  429. FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
  430. i = 0;
  431. } else {
  432. qp->wqe_ops.iw_set_fragment(wqe, 0,
  433. frag_cnt ? op_info->sg_list : NULL,
  434. qp->swqe_polarity);
  435. i = 1;
  436. }
  437. for (byte_off = 32; i < op_info->num_sges; i++) {
  438. qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
  439. qp->swqe_polarity);
  440. byte_off += 16;
  441. }
  442. /* if not an odd number set valid bit in next fragment */
  443. if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
  444. frag_cnt) {
  445. qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
  446. qp->swqe_polarity);
  447. if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
  448. ++addl_frag_cnt;
  449. }
  450. set_64bit_val(wqe, 16,
  451. FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
  452. FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
  453. hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
  454. FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
  455. FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
  456. (info->imm_data_valid ? 1 : 0)) |
  457. FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
  458. FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
  459. FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
  460. FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
  461. FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
  462. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
  463. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  464. FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
  465. FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
  466. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  467. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  468. set_64bit_val(wqe, 24, hdr);
  469. if (info->push_wqe) {
  470. irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
  471. } else {
  472. if (post_sq)
  473. irdma_uk_qp_post_wr(qp);
  474. }
  475. return 0;
  476. }
  477. /**
  478. * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
  479. * @wqe: wqe for setting fragment
  480. * @op_info: info for setting bind wqe values
  481. */
  482. static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
  483. struct irdma_bind_window *op_info)
  484. {
  485. set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
  486. set_64bit_val(wqe, 8,
  487. FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
  488. FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
  489. set_64bit_val(wqe, 16, op_info->bind_len);
  490. }
  491. /**
  492. * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
  493. * @wqe: pointer to wqe
  494. * @sge_list: table of pointers to inline data
  495. * @num_sges: Total inline data length
  496. * @polarity: compatibility parameter
  497. */
  498. static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
  499. u32 num_sges, u8 polarity)
  500. {
  501. u32 quanta_bytes_remaining = 16;
  502. int i;
  503. for (i = 0; i < num_sges; i++) {
  504. u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
  505. u32 sge_len = sge_list[i].length;
  506. while (sge_len) {
  507. u32 bytes_copied;
  508. bytes_copied = min(sge_len, quanta_bytes_remaining);
  509. memcpy(wqe, cur_sge, bytes_copied);
  510. wqe += bytes_copied;
  511. cur_sge += bytes_copied;
  512. quanta_bytes_remaining -= bytes_copied;
  513. sge_len -= bytes_copied;
  514. if (!quanta_bytes_remaining) {
  515. /* Remaining inline bytes reside after hdr */
  516. wqe += 16;
  517. quanta_bytes_remaining = 32;
  518. }
  519. }
  520. }
  521. }
  522. /**
  523. * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
  524. * @data_size: data size for inline
  525. *
  526. * Gets the quanta based on inline and immediate data.
  527. */
  528. static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
  529. {
  530. return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
  531. }
  532. /**
  533. * irdma_set_mw_bind_wqe - set mw bind in wqe
  534. * @wqe: wqe for setting mw bind
  535. * @op_info: info for setting wqe values
  536. */
  537. static void irdma_set_mw_bind_wqe(__le64 *wqe,
  538. struct irdma_bind_window *op_info)
  539. {
  540. set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
  541. set_64bit_val(wqe, 8,
  542. FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
  543. FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
  544. set_64bit_val(wqe, 16, op_info->bind_len);
  545. }
  546. /**
  547. * irdma_copy_inline_data - Copy inline data to wqe
  548. * @wqe: pointer to wqe
  549. * @sge_list: table of pointers to inline data
  550. * @num_sges: number of SGE's
  551. * @polarity: polarity of wqe valid bit
  552. */
  553. static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
  554. u32 num_sges, u8 polarity)
  555. {
  556. u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
  557. u32 quanta_bytes_remaining = 8;
  558. bool first_quanta = true;
  559. int i;
  560. wqe += 8;
  561. for (i = 0; i < num_sges; i++) {
  562. u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
  563. u32 sge_len = sge_list[i].length;
  564. while (sge_len) {
  565. u32 bytes_copied;
  566. bytes_copied = min(sge_len, quanta_bytes_remaining);
  567. memcpy(wqe, cur_sge, bytes_copied);
  568. wqe += bytes_copied;
  569. cur_sge += bytes_copied;
  570. quanta_bytes_remaining -= bytes_copied;
  571. sge_len -= bytes_copied;
  572. if (!quanta_bytes_remaining) {
  573. quanta_bytes_remaining = 31;
  574. /* Remaining inline bytes reside after hdr */
  575. if (first_quanta) {
  576. first_quanta = false;
  577. wqe += 16;
  578. } else {
  579. *wqe = inline_valid;
  580. wqe++;
  581. }
  582. }
  583. }
  584. }
  585. if (!first_quanta && quanta_bytes_remaining < 31)
  586. *(wqe + quanta_bytes_remaining) = inline_valid;
  587. }
  588. /**
  589. * irdma_inline_data_size_to_quanta - based on inline data, quanta
  590. * @data_size: data size for inline
  591. *
  592. * Gets the quanta based on inline and immediate data.
  593. */
  594. static u16 irdma_inline_data_size_to_quanta(u32 data_size)
  595. {
  596. if (data_size <= 8)
  597. return IRDMA_QP_WQE_MIN_QUANTA;
  598. else if (data_size <= 39)
  599. return 2;
  600. else if (data_size <= 70)
  601. return 3;
  602. else if (data_size <= 101)
  603. return 4;
  604. else if (data_size <= 132)
  605. return 5;
  606. else if (data_size <= 163)
  607. return 6;
  608. else if (data_size <= 194)
  609. return 7;
  610. else
  611. return 8;
  612. }
  613. /**
  614. * irdma_uk_inline_rdma_write - inline rdma write operation
  615. * @qp: hw qp ptr
  616. * @info: post sq information
  617. * @post_sq: flag to post sq
  618. */
  619. int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
  620. struct irdma_post_sq_info *info, bool post_sq)
  621. {
  622. __le64 *wqe;
  623. struct irdma_rdma_write *op_info;
  624. u64 hdr = 0;
  625. u32 wqe_idx;
  626. bool read_fence = false;
  627. u32 i, total_size = 0;
  628. u16 quanta;
  629. info->push_wqe = qp->push_db ? true : false;
  630. op_info = &info->op.rdma_write;
  631. if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
  632. return -EINVAL;
  633. for (i = 0; i < op_info->num_lo_sges; i++)
  634. total_size += op_info->lo_sg_list[i].length;
  635. if (unlikely(total_size > qp->max_inline_data))
  636. return -EINVAL;
  637. quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
  638. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
  639. info);
  640. if (!wqe)
  641. return -ENOMEM;
  642. irdma_clr_wqes(qp, wqe_idx);
  643. read_fence |= info->read_fence;
  644. set_64bit_val(wqe, 16,
  645. FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
  646. hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
  647. FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
  648. FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
  649. FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
  650. FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
  651. FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
  652. FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
  653. FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
  654. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
  655. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  656. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  657. if (info->imm_data_valid)
  658. set_64bit_val(wqe, 0,
  659. FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
  660. qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
  661. op_info->num_lo_sges,
  662. qp->swqe_polarity);
  663. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  664. set_64bit_val(wqe, 24, hdr);
  665. if (info->push_wqe) {
  666. irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
  667. } else {
  668. if (post_sq)
  669. irdma_uk_qp_post_wr(qp);
  670. }
  671. return 0;
  672. }
  673. /**
  674. * irdma_uk_inline_send - inline send operation
  675. * @qp: hw qp ptr
  676. * @info: post sq information
  677. * @post_sq: flag to post sq
  678. */
  679. int irdma_uk_inline_send(struct irdma_qp_uk *qp,
  680. struct irdma_post_sq_info *info, bool post_sq)
  681. {
  682. __le64 *wqe;
  683. struct irdma_post_send *op_info;
  684. u64 hdr;
  685. u32 wqe_idx;
  686. bool read_fence = false;
  687. u32 i, total_size = 0;
  688. u16 quanta;
  689. info->push_wqe = qp->push_db ? true : false;
  690. op_info = &info->op.send;
  691. if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
  692. return -EINVAL;
  693. for (i = 0; i < op_info->num_sges; i++)
  694. total_size += op_info->sg_list[i].length;
  695. if (unlikely(total_size > qp->max_inline_data))
  696. return -EINVAL;
  697. quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
  698. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
  699. info);
  700. if (!wqe)
  701. return -ENOMEM;
  702. irdma_clr_wqes(qp, wqe_idx);
  703. set_64bit_val(wqe, 16,
  704. FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
  705. FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
  706. read_fence |= info->read_fence;
  707. hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
  708. FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
  709. FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
  710. FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
  711. FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
  712. (info->imm_data_valid ? 1 : 0)) |
  713. FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
  714. FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
  715. FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
  716. FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
  717. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
  718. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  719. FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
  720. FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
  721. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  722. if (info->imm_data_valid)
  723. set_64bit_val(wqe, 0,
  724. FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
  725. qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
  726. op_info->num_sges, qp->swqe_polarity);
  727. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  728. set_64bit_val(wqe, 24, hdr);
  729. if (info->push_wqe) {
  730. irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
  731. } else {
  732. if (post_sq)
  733. irdma_uk_qp_post_wr(qp);
  734. }
  735. return 0;
  736. }
  737. /**
  738. * irdma_uk_stag_local_invalidate - stag invalidate operation
  739. * @qp: hw qp ptr
  740. * @info: post sq information
  741. * @post_sq: flag to post sq
  742. */
  743. int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
  744. struct irdma_post_sq_info *info,
  745. bool post_sq)
  746. {
  747. __le64 *wqe;
  748. struct irdma_inv_local_stag *op_info;
  749. u64 hdr;
  750. u32 wqe_idx;
  751. bool local_fence = false;
  752. struct ib_sge sge = {};
  753. info->push_wqe = qp->push_db ? true : false;
  754. op_info = &info->op.inv_local_stag;
  755. local_fence = info->local_fence;
  756. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
  757. 0, info);
  758. if (!wqe)
  759. return -ENOMEM;
  760. irdma_clr_wqes(qp, wqe_idx);
  761. sge.lkey = op_info->target_stag;
  762. qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
  763. set_64bit_val(wqe, 16, 0);
  764. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
  765. FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
  766. FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
  767. FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
  768. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
  769. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  770. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  771. set_64bit_val(wqe, 24, hdr);
  772. if (info->push_wqe) {
  773. irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
  774. post_sq);
  775. } else {
  776. if (post_sq)
  777. irdma_uk_qp_post_wr(qp);
  778. }
  779. return 0;
  780. }
  781. /**
  782. * irdma_uk_post_receive - post receive wqe
  783. * @qp: hw qp ptr
  784. * @info: post rq information
  785. */
  786. int irdma_uk_post_receive(struct irdma_qp_uk *qp,
  787. struct irdma_post_rq_info *info)
  788. {
  789. u32 wqe_idx, i, byte_off;
  790. u32 addl_frag_cnt;
  791. __le64 *wqe;
  792. u64 hdr;
  793. if (qp->max_rq_frag_cnt < info->num_sges)
  794. return -EINVAL;
  795. wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
  796. if (!wqe)
  797. return -ENOMEM;
  798. qp->rq_wrid_array[wqe_idx] = info->wr_id;
  799. addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
  800. qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
  801. qp->rwqe_polarity);
  802. for (i = 1, byte_off = 32; i < info->num_sges; i++) {
  803. qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
  804. qp->rwqe_polarity);
  805. byte_off += 16;
  806. }
  807. /* if not an odd number set valid bit in next fragment */
  808. if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
  809. info->num_sges) {
  810. qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
  811. qp->rwqe_polarity);
  812. if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
  813. ++addl_frag_cnt;
  814. }
  815. set_64bit_val(wqe, 16, 0);
  816. hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
  817. FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
  818. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  819. set_64bit_val(wqe, 24, hdr);
  820. return 0;
  821. }
  822. /**
  823. * irdma_uk_cq_resize - reset the cq buffer info
  824. * @cq: cq to resize
  825. * @cq_base: new cq buffer addr
  826. * @cq_size: number of cqes
  827. */
  828. void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
  829. {
  830. cq->cq_base = cq_base;
  831. cq->cq_size = cq_size;
  832. IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
  833. cq->polarity = 1;
  834. }
  835. /**
  836. * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
  837. * @cq: cq to resize
  838. * @cq_cnt: the count of the resized cq buffers
  839. */
  840. void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
  841. {
  842. u64 temp_val;
  843. u16 sw_cq_sel;
  844. u8 arm_next_se;
  845. u8 arm_next;
  846. u8 arm_seq_num;
  847. get_64bit_val(cq->shadow_area, 32, &temp_val);
  848. sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
  849. sw_cq_sel += cq_cnt;
  850. arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
  851. arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
  852. arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
  853. temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
  854. FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
  855. FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
  856. FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
  857. set_64bit_val(cq->shadow_area, 32, temp_val);
  858. }
  859. /**
  860. * irdma_uk_cq_request_notification - cq notification request (door bell)
  861. * @cq: hw cq
  862. * @cq_notify: notification type
  863. */
  864. void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
  865. enum irdma_cmpl_notify cq_notify)
  866. {
  867. u64 temp_val;
  868. u16 sw_cq_sel;
  869. u8 arm_next_se = 0;
  870. u8 arm_next = 0;
  871. u8 arm_seq_num;
  872. get_64bit_val(cq->shadow_area, 32, &temp_val);
  873. arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
  874. arm_seq_num++;
  875. sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
  876. arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
  877. arm_next_se |= 1;
  878. if (cq_notify == IRDMA_CQ_COMPL_EVENT)
  879. arm_next = 1;
  880. temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
  881. FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
  882. FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
  883. FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
  884. set_64bit_val(cq->shadow_area, 32, temp_val);
  885. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  886. writel(cq->cq_id, cq->cqe_alloc_db);
  887. }
  888. /**
  889. * irdma_uk_cq_poll_cmpl - get cq completion info
  890. * @cq: hw cq
  891. * @info: cq poll information returned
  892. */
  893. int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
  894. struct irdma_cq_poll_info *info)
  895. {
  896. u64 comp_ctx, qword0, qword2, qword3;
  897. __le64 *cqe;
  898. struct irdma_qp_uk *qp;
  899. struct irdma_ring *pring = NULL;
  900. u32 wqe_idx;
  901. int ret_code;
  902. bool move_cq_head = true;
  903. u8 polarity;
  904. bool ext_valid;
  905. __le64 *ext_cqe;
  906. if (cq->avoid_mem_cflct)
  907. cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
  908. else
  909. cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
  910. get_64bit_val(cqe, 24, &qword3);
  911. polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
  912. if (polarity != cq->polarity)
  913. return -ENOENT;
  914. /* Ensure CQE contents are read after valid bit is checked */
  915. dma_rmb();
  916. ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
  917. if (ext_valid) {
  918. u64 qword6, qword7;
  919. u32 peek_head;
  920. if (cq->avoid_mem_cflct) {
  921. ext_cqe = (__le64 *)((u8 *)cqe + 32);
  922. get_64bit_val(ext_cqe, 24, &qword7);
  923. polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
  924. } else {
  925. peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
  926. ext_cqe = cq->cq_base[peek_head].buf;
  927. get_64bit_val(ext_cqe, 24, &qword7);
  928. polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
  929. if (!peek_head)
  930. polarity ^= 1;
  931. }
  932. if (polarity != cq->polarity)
  933. return -ENOENT;
  934. /* Ensure ext CQE contents are read after ext valid bit is checked */
  935. dma_rmb();
  936. info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
  937. if (info->imm_valid) {
  938. u64 qword4;
  939. get_64bit_val(ext_cqe, 0, &qword4);
  940. info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
  941. }
  942. info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
  943. info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
  944. if (info->ud_smac_valid || info->ud_vlan_valid) {
  945. get_64bit_val(ext_cqe, 16, &qword6);
  946. if (info->ud_vlan_valid)
  947. info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
  948. if (info->ud_smac_valid) {
  949. info->ud_smac[5] = qword6 & 0xFF;
  950. info->ud_smac[4] = (qword6 >> 8) & 0xFF;
  951. info->ud_smac[3] = (qword6 >> 16) & 0xFF;
  952. info->ud_smac[2] = (qword6 >> 24) & 0xFF;
  953. info->ud_smac[1] = (qword6 >> 32) & 0xFF;
  954. info->ud_smac[0] = (qword6 >> 40) & 0xFF;
  955. }
  956. }
  957. } else {
  958. info->imm_valid = false;
  959. info->ud_smac_valid = false;
  960. info->ud_vlan_valid = false;
  961. }
  962. info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
  963. info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
  964. info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
  965. info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
  966. if (info->error) {
  967. info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
  968. info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
  969. if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
  970. info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
  971. /* Set the min error to standard flush error code for remaining cqes */
  972. if (info->minor_err != FLUSH_GENERAL_ERR) {
  973. qword3 &= ~IRDMA_CQ_MINERR;
  974. qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
  975. set_64bit_val(cqe, 24, qword3);
  976. }
  977. } else {
  978. info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
  979. }
  980. } else {
  981. info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
  982. }
  983. get_64bit_val(cqe, 0, &qword0);
  984. get_64bit_val(cqe, 16, &qword2);
  985. info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
  986. info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
  987. info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
  988. get_64bit_val(cqe, 8, &comp_ctx);
  989. info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
  990. qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
  991. if (!qp || qp->destroy_pending) {
  992. ret_code = -EFAULT;
  993. goto exit;
  994. }
  995. wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
  996. info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
  997. info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
  998. if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
  999. u32 array_idx;
  1000. array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
  1001. if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
  1002. info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
  1003. if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
  1004. ret_code = -ENOENT;
  1005. goto exit;
  1006. }
  1007. info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
  1008. array_idx = qp->rq_ring.tail;
  1009. } else {
  1010. info->wr_id = qp->rq_wrid_array[array_idx];
  1011. }
  1012. info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
  1013. if (qword3 & IRDMACQ_STAG) {
  1014. info->stag_invalid_set = true;
  1015. info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
  1016. } else {
  1017. info->stag_invalid_set = false;
  1018. }
  1019. IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
  1020. if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
  1021. qp->rq_flush_seen = true;
  1022. if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
  1023. qp->rq_flush_complete = true;
  1024. else
  1025. move_cq_head = false;
  1026. }
  1027. pring = &qp->rq_ring;
  1028. } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
  1029. if (qp->first_sq_wq) {
  1030. if (wqe_idx + 1 >= qp->conn_wqes)
  1031. qp->first_sq_wq = false;
  1032. if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
  1033. IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
  1034. IRDMA_RING_MOVE_TAIL(cq->cq_ring);
  1035. set_64bit_val(cq->shadow_area, 0,
  1036. IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
  1037. memset(info, 0,
  1038. sizeof(struct irdma_cq_poll_info));
  1039. return irdma_uk_cq_poll_cmpl(cq, info);
  1040. }
  1041. }
  1042. /*cease posting push mode on push drop*/
  1043. if (info->push_dropped) {
  1044. qp->push_mode = false;
  1045. qp->push_dropped = true;
  1046. }
  1047. if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
  1048. info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
  1049. if (!info->comp_status)
  1050. info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
  1051. info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
  1052. IRDMA_RING_SET_TAIL(qp->sq_ring,
  1053. wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
  1054. } else {
  1055. if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
  1056. ret_code = -ENOENT;
  1057. goto exit;
  1058. }
  1059. do {
  1060. __le64 *sw_wqe;
  1061. u64 wqe_qword;
  1062. u32 tail;
  1063. tail = qp->sq_ring.tail;
  1064. sw_wqe = qp->sq_base[tail].elem;
  1065. get_64bit_val(sw_wqe, 24,
  1066. &wqe_qword);
  1067. info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
  1068. wqe_qword);
  1069. IRDMA_RING_SET_TAIL(qp->sq_ring,
  1070. tail + qp->sq_wrtrk_array[tail].quanta);
  1071. if (info->op_type != IRDMAQP_OP_NOP) {
  1072. info->wr_id = qp->sq_wrtrk_array[tail].wrid;
  1073. info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
  1074. break;
  1075. }
  1076. } while (1);
  1077. if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
  1078. info->minor_err == FLUSH_PROT_ERR)
  1079. info->minor_err = FLUSH_MW_BIND_ERR;
  1080. qp->sq_flush_seen = true;
  1081. if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
  1082. qp->sq_flush_complete = true;
  1083. }
  1084. pring = &qp->sq_ring;
  1085. }
  1086. ret_code = 0;
  1087. exit:
  1088. if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
  1089. if (pring && IRDMA_RING_MORE_WORK(*pring))
  1090. move_cq_head = false;
  1091. if (move_cq_head) {
  1092. IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
  1093. if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
  1094. cq->polarity ^= 1;
  1095. if (ext_valid && !cq->avoid_mem_cflct) {
  1096. IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
  1097. if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
  1098. cq->polarity ^= 1;
  1099. }
  1100. IRDMA_RING_MOVE_TAIL(cq->cq_ring);
  1101. if (!cq->avoid_mem_cflct && ext_valid)
  1102. IRDMA_RING_MOVE_TAIL(cq->cq_ring);
  1103. set_64bit_val(cq->shadow_area, 0,
  1104. IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
  1105. } else {
  1106. qword3 &= ~IRDMA_CQ_WQEIDX;
  1107. qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
  1108. set_64bit_val(cqe, 24, qword3);
  1109. }
  1110. return ret_code;
  1111. }
  1112. /**
  1113. * irdma_qp_round_up - return round up qp wq depth
  1114. * @wqdepth: wq depth in quanta to round up
  1115. */
  1116. static int irdma_qp_round_up(u32 wqdepth)
  1117. {
  1118. int scount = 1;
  1119. for (wqdepth--; scount <= 16; scount *= 2)
  1120. wqdepth |= wqdepth >> scount;
  1121. return ++wqdepth;
  1122. }
  1123. /**
  1124. * irdma_get_wqe_shift - get shift count for maximum wqe size
  1125. * @uk_attrs: qp HW attributes
  1126. * @sge: Maximum Scatter Gather Elements wqe
  1127. * @inline_data: Maximum inline data size
  1128. * @shift: Returns the shift needed based on sge
  1129. *
  1130. * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
  1131. * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
  1132. * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
  1133. * size of 64 bytes).
  1134. * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
  1135. * size of 256 bytes).
  1136. */
  1137. void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
  1138. u32 inline_data, u8 *shift)
  1139. {
  1140. *shift = 0;
  1141. if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
  1142. if (sge > 1 || inline_data > 8) {
  1143. if (sge < 4 && inline_data <= 39)
  1144. *shift = 1;
  1145. else if (sge < 8 && inline_data <= 101)
  1146. *shift = 2;
  1147. else
  1148. *shift = 3;
  1149. }
  1150. } else if (sge > 1 || inline_data > 16) {
  1151. *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
  1152. }
  1153. }
  1154. /*
  1155. * irdma_get_sqdepth - get SQ depth (quanta)
  1156. * @uk_attrs: qp HW attributes
  1157. * @sq_size: SQ size
  1158. * @shift: shift which determines size of WQE
  1159. * @sqdepth: depth of SQ
  1160. *
  1161. */
  1162. int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
  1163. u32 *sqdepth)
  1164. {
  1165. *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
  1166. if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
  1167. *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
  1168. else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
  1169. return -EINVAL;
  1170. return 0;
  1171. }
  1172. /*
  1173. * irdma_get_rqdepth - get RQ depth (quanta)
  1174. * @uk_attrs: qp HW attributes
  1175. * @rq_size: RQ size
  1176. * @shift: shift which determines size of WQE
  1177. * @rqdepth: depth of RQ
  1178. */
  1179. int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
  1180. u32 *rqdepth)
  1181. {
  1182. *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
  1183. if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
  1184. *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
  1185. else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
  1186. return -EINVAL;
  1187. return 0;
  1188. }
  1189. static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
  1190. .iw_copy_inline_data = irdma_copy_inline_data,
  1191. .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
  1192. .iw_set_fragment = irdma_set_fragment,
  1193. .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
  1194. };
  1195. static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
  1196. .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
  1197. .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
  1198. .iw_set_fragment = irdma_set_fragment_gen_1,
  1199. .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
  1200. };
  1201. /**
  1202. * irdma_setup_connection_wqes - setup WQEs necessary to complete
  1203. * connection.
  1204. * @qp: hw qp (user and kernel)
  1205. * @info: qp initialization info
  1206. */
  1207. static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
  1208. struct irdma_qp_uk_init_info *info)
  1209. {
  1210. u16 move_cnt = 1;
  1211. if (!info->legacy_mode &&
  1212. (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
  1213. move_cnt = 3;
  1214. qp->conn_wqes = move_cnt;
  1215. IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
  1216. IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
  1217. IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
  1218. }
  1219. /**
  1220. * irdma_uk_qp_init - initialize shared qp
  1221. * @qp: hw qp (user and kernel)
  1222. * @info: qp initialization info
  1223. *
  1224. * initializes the vars used in both user and kernel mode.
  1225. * size of the wqe depends on numbers of max. fragements
  1226. * allowed. Then size of wqe * the number of wqes should be the
  1227. * amount of memory allocated for sq and rq.
  1228. */
  1229. int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
  1230. {
  1231. int ret_code = 0;
  1232. u32 sq_ring_size;
  1233. u8 sqshift, rqshift;
  1234. qp->uk_attrs = info->uk_attrs;
  1235. if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
  1236. info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
  1237. return -EINVAL;
  1238. irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
  1239. if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
  1240. irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
  1241. info->max_inline_data, &sqshift);
  1242. if (info->abi_ver > 4)
  1243. rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
  1244. } else {
  1245. irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
  1246. info->max_inline_data, &sqshift);
  1247. }
  1248. qp->qp_caps = info->qp_caps;
  1249. qp->sq_base = info->sq;
  1250. qp->rq_base = info->rq;
  1251. qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
  1252. qp->shadow_area = info->shadow_area;
  1253. qp->sq_wrtrk_array = info->sq_wrtrk_array;
  1254. qp->rq_wrid_array = info->rq_wrid_array;
  1255. qp->wqe_alloc_db = info->wqe_alloc_db;
  1256. qp->qp_id = info->qp_id;
  1257. qp->sq_size = info->sq_size;
  1258. qp->push_mode = false;
  1259. qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
  1260. sq_ring_size = qp->sq_size << sqshift;
  1261. IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
  1262. IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
  1263. if (info->first_sq_wq) {
  1264. irdma_setup_connection_wqes(qp, info);
  1265. qp->swqe_polarity = 1;
  1266. qp->first_sq_wq = true;
  1267. } else {
  1268. qp->swqe_polarity = 0;
  1269. }
  1270. qp->swqe_polarity_deferred = 1;
  1271. qp->rwqe_polarity = 0;
  1272. qp->rq_size = info->rq_size;
  1273. qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
  1274. qp->max_inline_data = info->max_inline_data;
  1275. qp->rq_wqe_size = rqshift;
  1276. IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
  1277. qp->rq_wqe_size_multiplier = 1 << rqshift;
  1278. if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
  1279. qp->wqe_ops = iw_wqe_uk_ops_gen_1;
  1280. else
  1281. qp->wqe_ops = iw_wqe_uk_ops;
  1282. return ret_code;
  1283. }
  1284. /**
  1285. * irdma_uk_cq_init - initialize shared cq (user and kernel)
  1286. * @cq: hw cq
  1287. * @info: hw cq initialization info
  1288. */
  1289. void irdma_uk_cq_init(struct irdma_cq_uk *cq,
  1290. struct irdma_cq_uk_init_info *info)
  1291. {
  1292. cq->cq_base = info->cq_base;
  1293. cq->cq_id = info->cq_id;
  1294. cq->cq_size = info->cq_size;
  1295. cq->cqe_alloc_db = info->cqe_alloc_db;
  1296. cq->cq_ack_db = info->cq_ack_db;
  1297. cq->shadow_area = info->shadow_area;
  1298. cq->avoid_mem_cflct = info->avoid_mem_cflct;
  1299. IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
  1300. cq->polarity = 1;
  1301. }
  1302. /**
  1303. * irdma_uk_clean_cq - clean cq entries
  1304. * @q: completion context
  1305. * @cq: cq to clean
  1306. */
  1307. void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
  1308. {
  1309. __le64 *cqe;
  1310. u64 qword3, comp_ctx;
  1311. u32 cq_head;
  1312. u8 polarity, temp;
  1313. cq_head = cq->cq_ring.head;
  1314. temp = cq->polarity;
  1315. do {
  1316. if (cq->avoid_mem_cflct)
  1317. cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
  1318. else
  1319. cqe = cq->cq_base[cq_head].buf;
  1320. get_64bit_val(cqe, 24, &qword3);
  1321. polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
  1322. if (polarity != temp)
  1323. break;
  1324. /* Ensure CQE contents are read after valid bit is checked */
  1325. dma_rmb();
  1326. get_64bit_val(cqe, 8, &comp_ctx);
  1327. if ((void *)(unsigned long)comp_ctx == q)
  1328. set_64bit_val(cqe, 8, 0);
  1329. cq_head = (cq_head + 1) % cq->cq_ring.size;
  1330. if (!cq_head)
  1331. temp ^= 1;
  1332. } while (true);
  1333. }
  1334. /**
  1335. * irdma_nop - post a nop
  1336. * @qp: hw qp ptr
  1337. * @wr_id: work request id
  1338. * @signaled: signaled for completion
  1339. * @post_sq: ring doorbell
  1340. */
  1341. int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
  1342. {
  1343. __le64 *wqe;
  1344. u64 hdr;
  1345. u32 wqe_idx;
  1346. struct irdma_post_sq_info info = {};
  1347. info.push_wqe = false;
  1348. info.wr_id = wr_id;
  1349. wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
  1350. 0, &info);
  1351. if (!wqe)
  1352. return -ENOMEM;
  1353. irdma_clr_wqes(qp, wqe_idx);
  1354. set_64bit_val(wqe, 0, 0);
  1355. set_64bit_val(wqe, 8, 0);
  1356. set_64bit_val(wqe, 16, 0);
  1357. hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
  1358. FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
  1359. FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
  1360. dma_wmb(); /* make sure WQE is populated before valid bit is set */
  1361. set_64bit_val(wqe, 24, hdr);
  1362. if (post_sq)
  1363. irdma_uk_qp_post_wr(qp);
  1364. return 0;
  1365. }
  1366. /**
  1367. * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
  1368. * @frag_cnt: number of fragments
  1369. * @quanta: quanta for frag_cnt
  1370. */
  1371. int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
  1372. {
  1373. switch (frag_cnt) {
  1374. case 0:
  1375. case 1:
  1376. *quanta = IRDMA_QP_WQE_MIN_QUANTA;
  1377. break;
  1378. case 2:
  1379. case 3:
  1380. *quanta = 2;
  1381. break;
  1382. case 4:
  1383. case 5:
  1384. *quanta = 3;
  1385. break;
  1386. case 6:
  1387. case 7:
  1388. *quanta = 4;
  1389. break;
  1390. case 8:
  1391. case 9:
  1392. *quanta = 5;
  1393. break;
  1394. case 10:
  1395. case 11:
  1396. *quanta = 6;
  1397. break;
  1398. case 12:
  1399. case 13:
  1400. *quanta = 7;
  1401. break;
  1402. case 14:
  1403. case 15: /* when immediate data is present */
  1404. *quanta = 8;
  1405. break;
  1406. default:
  1407. return -EINVAL;
  1408. }
  1409. return 0;
  1410. }
  1411. /**
  1412. * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
  1413. * @frag_cnt: number of fragments
  1414. * @wqe_size: size in bytes given frag_cnt
  1415. */
  1416. int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
  1417. {
  1418. switch (frag_cnt) {
  1419. case 0:
  1420. case 1:
  1421. *wqe_size = 32;
  1422. break;
  1423. case 2:
  1424. case 3:
  1425. *wqe_size = 64;
  1426. break;
  1427. case 4:
  1428. case 5:
  1429. case 6:
  1430. case 7:
  1431. *wqe_size = 128;
  1432. break;
  1433. case 8:
  1434. case 9:
  1435. case 10:
  1436. case 11:
  1437. case 12:
  1438. case 13:
  1439. case 14:
  1440. *wqe_size = 256;
  1441. break;
  1442. default:
  1443. return -EINVAL;
  1444. }
  1445. return 0;
  1446. }