puda.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739
  1. // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
  2. /* Copyright (c) 2015 - 2021 Intel Corporation */
  3. #include "osdep.h"
  4. #include "hmc.h"
  5. #include "defs.h"
  6. #include "type.h"
  7. #include "protos.h"
  8. #include "puda.h"
  9. #include "ws.h"
  10. static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
  11. struct irdma_puda_buf *buf);
  12. static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
  13. static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
  14. struct irdma_puda_buf *buf, u32 wqe_idx);
  15. /**
  16. * irdma_puda_get_listbuf - get buffer from puda list
  17. * @list: list to use for buffers (ILQ or IEQ)
  18. */
  19. static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)
  20. {
  21. struct irdma_puda_buf *buf = NULL;
  22. if (!list_empty(list)) {
  23. buf = (struct irdma_puda_buf *)list->next;
  24. list_del((struct list_head *)&buf->list);
  25. }
  26. return buf;
  27. }
  28. /**
  29. * irdma_puda_get_bufpool - return buffer from resource
  30. * @rsrc: resource to use for buffer
  31. */
  32. struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
  33. {
  34. struct irdma_puda_buf *buf = NULL;
  35. struct list_head *list = &rsrc->bufpool;
  36. unsigned long flags;
  37. spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  38. buf = irdma_puda_get_listbuf(list);
  39. if (buf) {
  40. rsrc->avail_buf_count--;
  41. buf->vsi = rsrc->vsi;
  42. } else {
  43. rsrc->stats_buf_alloc_fail++;
  44. }
  45. spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  46. return buf;
  47. }
  48. /**
  49. * irdma_puda_ret_bufpool - return buffer to rsrc list
  50. * @rsrc: resource to use for buffer
  51. * @buf: buffer to return to resource
  52. */
  53. void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
  54. struct irdma_puda_buf *buf)
  55. {
  56. unsigned long flags;
  57. buf->do_lpb = false;
  58. spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  59. list_add(&buf->list, &rsrc->bufpool);
  60. spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  61. rsrc->avail_buf_count++;
  62. }
  63. /**
  64. * irdma_puda_post_recvbuf - set wqe for rcv buffer
  65. * @rsrc: resource ptr
  66. * @wqe_idx: wqe index to use
  67. * @buf: puda buffer for rcv q
  68. * @initial: flag if during init time
  69. */
  70. static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
  71. struct irdma_puda_buf *buf, bool initial)
  72. {
  73. __le64 *wqe;
  74. struct irdma_sc_qp *qp = &rsrc->qp;
  75. u64 offset24 = 0;
  76. /* Synch buffer for use by device */
  77. dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
  78. buf->mem.size, DMA_BIDIRECTIONAL);
  79. qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
  80. wqe = qp->qp_uk.rq_base[wqe_idx].elem;
  81. if (!initial)
  82. get_64bit_val(wqe, 24, &offset24);
  83. offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
  84. set_64bit_val(wqe, 16, 0);
  85. set_64bit_val(wqe, 0, buf->mem.pa);
  86. if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
  87. set_64bit_val(wqe, 8,
  88. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
  89. } else {
  90. set_64bit_val(wqe, 8,
  91. FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
  92. offset24);
  93. }
  94. dma_wmb(); /* make sure WQE is written before valid bit is set */
  95. set_64bit_val(wqe, 24, offset24);
  96. }
  97. /**
  98. * irdma_puda_replenish_rq - post rcv buffers
  99. * @rsrc: resource to use for buffer
  100. * @initial: flag if during init time
  101. */
  102. static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
  103. {
  104. u32 i;
  105. u32 invalid_cnt = rsrc->rxq_invalid_cnt;
  106. struct irdma_puda_buf *buf = NULL;
  107. for (i = 0; i < invalid_cnt; i++) {
  108. buf = irdma_puda_get_bufpool(rsrc);
  109. if (!buf)
  110. return -ENOBUFS;
  111. irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
  112. rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
  113. rsrc->rxq_invalid_cnt--;
  114. }
  115. return 0;
  116. }
  117. /**
  118. * irdma_puda_alloc_buf - allocate mem for buffer
  119. * @dev: iwarp device
  120. * @len: length of buffer
  121. */
  122. static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
  123. u32 len)
  124. {
  125. struct irdma_puda_buf *buf;
  126. struct irdma_virt_mem buf_mem;
  127. buf_mem.size = sizeof(struct irdma_puda_buf);
  128. buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
  129. if (!buf_mem.va)
  130. return NULL;
  131. buf = buf_mem.va;
  132. buf->mem.size = len;
  133. buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
  134. if (!buf->mem.va)
  135. goto free_virt;
  136. buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va,
  137. buf->mem.size, DMA_BIDIRECTIONAL);
  138. if (dma_mapping_error(dev->hw->device, buf->mem.pa)) {
  139. kfree(buf->mem.va);
  140. goto free_virt;
  141. }
  142. buf->buf_mem.va = buf_mem.va;
  143. buf->buf_mem.size = buf_mem.size;
  144. return buf;
  145. free_virt:
  146. kfree(buf_mem.va);
  147. return NULL;
  148. }
  149. /**
  150. * irdma_puda_dele_buf - delete buffer back to system
  151. * @dev: iwarp device
  152. * @buf: buffer to free
  153. */
  154. static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,
  155. struct irdma_puda_buf *buf)
  156. {
  157. dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size,
  158. DMA_BIDIRECTIONAL);
  159. kfree(buf->mem.va);
  160. kfree(buf->buf_mem.va);
  161. }
  162. /**
  163. * irdma_puda_get_next_send_wqe - return next wqe for processing
  164. * @qp: puda qp for wqe
  165. * @wqe_idx: wqe index for caller
  166. */
  167. static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
  168. u32 *wqe_idx)
  169. {
  170. int ret_code = 0;
  171. *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
  172. if (!*wqe_idx)
  173. qp->swqe_polarity = !qp->swqe_polarity;
  174. IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
  175. if (ret_code)
  176. return NULL;
  177. return qp->sq_base[*wqe_idx].elem;
  178. }
  179. /**
  180. * irdma_puda_poll_info - poll cq for completion
  181. * @cq: cq for poll
  182. * @info: info return for successful completion
  183. */
  184. static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
  185. struct irdma_puda_cmpl_info *info)
  186. {
  187. struct irdma_cq_uk *cq_uk = &cq->cq_uk;
  188. u64 qword0, qword2, qword3, qword6;
  189. __le64 *cqe;
  190. __le64 *ext_cqe = NULL;
  191. u64 qword7 = 0;
  192. u64 comp_ctx;
  193. bool valid_bit;
  194. bool ext_valid = 0;
  195. u32 major_err, minor_err;
  196. u32 peek_head;
  197. bool error;
  198. u8 polarity;
  199. cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
  200. get_64bit_val(cqe, 24, &qword3);
  201. valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
  202. if (valid_bit != cq_uk->polarity)
  203. return -ENOENT;
  204. /* Ensure CQE contents are read after valid bit is checked */
  205. dma_rmb();
  206. if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  207. ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
  208. if (ext_valid) {
  209. peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
  210. ext_cqe = cq_uk->cq_base[peek_head].buf;
  211. get_64bit_val(ext_cqe, 24, &qword7);
  212. polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
  213. if (!peek_head)
  214. polarity ^= 1;
  215. if (polarity != cq_uk->polarity)
  216. return -ENOENT;
  217. /* Ensure ext CQE contents are read after ext valid bit is checked */
  218. dma_rmb();
  219. IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
  220. if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
  221. cq_uk->polarity = !cq_uk->polarity;
  222. /* update cq tail in cq shadow memory also */
  223. IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
  224. }
  225. print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe,
  226. 32, false);
  227. if (ext_valid)
  228. print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET,
  229. 16, 8, ext_cqe, 32, false);
  230. error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
  231. if (error) {
  232. ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n");
  233. major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
  234. minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
  235. info->compl_error = major_err << 16 | minor_err;
  236. return -EIO;
  237. }
  238. get_64bit_val(cqe, 0, &qword0);
  239. get_64bit_val(cqe, 16, &qword2);
  240. info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
  241. info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
  242. if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  243. info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
  244. get_64bit_val(cqe, 8, &comp_ctx);
  245. info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
  246. info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
  247. if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
  248. if (ext_valid) {
  249. info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
  250. if (info->vlan_valid) {
  251. get_64bit_val(ext_cqe, 16, &qword6);
  252. info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
  253. }
  254. info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
  255. if (info->smac_valid) {
  256. get_64bit_val(ext_cqe, 16, &qword6);
  257. info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
  258. info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
  259. info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
  260. info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
  261. info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
  262. info->smac[5] = (u8)(qword6 & 0xFF);
  263. }
  264. }
  265. if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
  266. info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
  267. info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
  268. info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
  269. }
  270. info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
  271. }
  272. return 0;
  273. }
  274. /**
  275. * irdma_puda_poll_cmpl - processes completion for cq
  276. * @dev: iwarp device
  277. * @cq: cq getting interrupt
  278. * @compl_err: return any completion err
  279. */
  280. int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
  281. u32 *compl_err)
  282. {
  283. struct irdma_qp_uk *qp;
  284. struct irdma_cq_uk *cq_uk = &cq->cq_uk;
  285. struct irdma_puda_cmpl_info info = {};
  286. int ret = 0;
  287. struct irdma_puda_buf *buf;
  288. struct irdma_puda_rsrc *rsrc;
  289. u8 cq_type = cq->cq_type;
  290. unsigned long flags;
  291. if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
  292. rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
  293. cq->vsi->ieq;
  294. } else {
  295. ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
  296. return -EINVAL;
  297. }
  298. ret = irdma_puda_poll_info(cq, &info);
  299. *compl_err = info.compl_error;
  300. if (ret == -ENOENT)
  301. return ret;
  302. if (ret)
  303. goto done;
  304. qp = info.qp;
  305. if (!qp || !rsrc) {
  306. ret = -EFAULT;
  307. goto done;
  308. }
  309. if (qp->qp_id != rsrc->qp_id) {
  310. ret = -EFAULT;
  311. goto done;
  312. }
  313. if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
  314. buf = (struct irdma_puda_buf *)(uintptr_t)
  315. qp->rq_wrid_array[info.wqe_idx];
  316. /* reusing so synch the buffer for CPU use */
  317. dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
  318. buf->mem.size, DMA_BIDIRECTIONAL);
  319. /* Get all the tcpip information in the buf header */
  320. ret = irdma_puda_get_tcpip_info(&info, buf);
  321. if (ret) {
  322. rsrc->stats_rcvd_pkt_err++;
  323. if (cq_type == IRDMA_CQ_TYPE_ILQ) {
  324. irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
  325. info.wqe_idx);
  326. } else {
  327. irdma_puda_ret_bufpool(rsrc, buf);
  328. irdma_puda_replenish_rq(rsrc, false);
  329. }
  330. goto done;
  331. }
  332. rsrc->stats_pkt_rcvd++;
  333. rsrc->compl_rxwqe_idx = info.wqe_idx;
  334. ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n");
  335. rsrc->receive(rsrc->vsi, buf);
  336. if (cq_type == IRDMA_CQ_TYPE_ILQ)
  337. irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
  338. else
  339. irdma_puda_replenish_rq(rsrc, false);
  340. } else {
  341. ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n");
  342. buf = (struct irdma_puda_buf *)(uintptr_t)
  343. qp->sq_wrtrk_array[info.wqe_idx].wrid;
  344. /* reusing so synch the buffer for CPU use */
  345. dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
  346. buf->mem.size, DMA_BIDIRECTIONAL);
  347. IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
  348. rsrc->xmit_complete(rsrc->vsi, buf);
  349. spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  350. rsrc->tx_wqe_avail_cnt++;
  351. spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  352. if (!list_empty(&rsrc->txpend))
  353. irdma_puda_send_buf(rsrc, NULL);
  354. }
  355. done:
  356. IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
  357. if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
  358. cq_uk->polarity = !cq_uk->polarity;
  359. /* update cq tail in cq shadow memory also */
  360. IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
  361. set_64bit_val(cq_uk->shadow_area, 0,
  362. IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
  363. return ret;
  364. }
  365. /**
  366. * irdma_puda_send - complete send wqe for transmit
  367. * @qp: puda qp for send
  368. * @info: buffer information for transmit
  369. */
  370. int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
  371. {
  372. __le64 *wqe;
  373. u32 iplen, l4len;
  374. u64 hdr[2];
  375. u32 wqe_idx;
  376. u8 iipt;
  377. /* number of 32 bits DWORDS in header */
  378. l4len = info->tcplen >> 2;
  379. if (info->ipv4) {
  380. iipt = 3;
  381. iplen = 5;
  382. } else {
  383. iipt = 1;
  384. iplen = 10;
  385. }
  386. wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
  387. if (!wqe)
  388. return -ENOMEM;
  389. qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
  390. /* Third line of WQE descriptor */
  391. /* maclen is in words */
  392. if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  393. hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
  394. hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
  395. FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
  396. FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
  397. FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
  398. FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
  399. qp->qp_uk.swqe_polarity);
  400. /* Forth line of WQE descriptor */
  401. set_64bit_val(wqe, 0, info->paddr);
  402. set_64bit_val(wqe, 8,
  403. FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
  404. FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
  405. } else {
  406. hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
  407. FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
  408. FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
  409. FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
  410. FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
  411. hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
  412. FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
  413. FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
  414. FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
  415. /* Forth line of WQE descriptor */
  416. set_64bit_val(wqe, 0, info->paddr);
  417. set_64bit_val(wqe, 8,
  418. FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
  419. }
  420. set_64bit_val(wqe, 16, hdr[0]);
  421. dma_wmb(); /* make sure WQE is written before valid bit is set */
  422. set_64bit_val(wqe, 24, hdr[1]);
  423. print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8,
  424. wqe, 32, false);
  425. irdma_uk_qp_post_wr(&qp->qp_uk);
  426. return 0;
  427. }
  428. /**
  429. * irdma_puda_send_buf - transmit puda buffer
  430. * @rsrc: resource to use for buffer
  431. * @buf: puda buffer to transmit
  432. */
  433. void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
  434. struct irdma_puda_buf *buf)
  435. {
  436. struct irdma_puda_send_info info;
  437. int ret = 0;
  438. unsigned long flags;
  439. spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  440. /* if no wqe available or not from a completion and we have
  441. * pending buffers, we must queue new buffer
  442. */
  443. if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
  444. list_add_tail(&buf->list, &rsrc->txpend);
  445. spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  446. rsrc->stats_sent_pkt_q++;
  447. if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
  448. ibdev_dbg(to_ibdev(rsrc->dev),
  449. "PUDA: adding to txpend\n");
  450. return;
  451. }
  452. rsrc->tx_wqe_avail_cnt--;
  453. /* if we are coming from a completion and have pending buffers
  454. * then Get one from pending list
  455. */
  456. if (!buf) {
  457. buf = irdma_puda_get_listbuf(&rsrc->txpend);
  458. if (!buf)
  459. goto done;
  460. }
  461. info.scratch = buf;
  462. info.paddr = buf->mem.pa;
  463. info.len = buf->totallen;
  464. info.tcplen = buf->tcphlen;
  465. info.ipv4 = buf->ipv4;
  466. if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  467. info.ah_id = buf->ah_id;
  468. } else {
  469. info.maclen = buf->maclen;
  470. info.do_lpb = buf->do_lpb;
  471. }
  472. /* Synch buffer for use by device */
  473. dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
  474. buf->mem.size, DMA_BIDIRECTIONAL);
  475. ret = irdma_puda_send(&rsrc->qp, &info);
  476. if (ret) {
  477. rsrc->tx_wqe_avail_cnt++;
  478. rsrc->stats_sent_pkt_q++;
  479. list_add(&buf->list, &rsrc->txpend);
  480. if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
  481. ibdev_dbg(to_ibdev(rsrc->dev),
  482. "PUDA: adding to puda_send\n");
  483. } else {
  484. rsrc->stats_pkt_sent++;
  485. }
  486. done:
  487. spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  488. }
  489. /**
  490. * irdma_puda_qp_setctx - during init, set qp's context
  491. * @rsrc: qp's resource
  492. */
  493. static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
  494. {
  495. struct irdma_sc_qp *qp = &rsrc->qp;
  496. __le64 *qp_ctx = qp->hw_host_ctx;
  497. set_64bit_val(qp_ctx, 8, qp->sq_pa);
  498. set_64bit_val(qp_ctx, 16, qp->rq_pa);
  499. set_64bit_val(qp_ctx, 24,
  500. FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
  501. FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
  502. set_64bit_val(qp_ctx, 48,
  503. FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
  504. set_64bit_val(qp_ctx, 56, 0);
  505. if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
  506. set_64bit_val(qp_ctx, 64, 1);
  507. set_64bit_val(qp_ctx, 136,
  508. FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
  509. FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
  510. set_64bit_val(qp_ctx, 144,
  511. FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
  512. set_64bit_val(qp_ctx, 160,
  513. FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
  514. FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
  515. set_64bit_val(qp_ctx, 168,
  516. FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
  517. set_64bit_val(qp_ctx, 176,
  518. FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
  519. FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
  520. FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
  521. print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16,
  522. 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
  523. }
  524. /**
  525. * irdma_puda_qp_wqe - setup wqe for qp create
  526. * @dev: Device
  527. * @qp: Resource qp
  528. */
  529. static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
  530. {
  531. struct irdma_sc_cqp *cqp;
  532. __le64 *wqe;
  533. u64 hdr;
  534. struct irdma_ccq_cqe_info compl_info;
  535. int status = 0;
  536. cqp = dev->cqp;
  537. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
  538. if (!wqe)
  539. return -ENOMEM;
  540. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  541. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  542. hdr = qp->qp_uk.qp_id |
  543. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
  544. FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
  545. FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
  546. FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
  547. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  548. dma_wmb(); /* make sure WQE is written before valid bit is set */
  549. set_64bit_val(wqe, 24, hdr);
  550. print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16,
  551. 8, wqe, 40, false);
  552. irdma_sc_cqp_post_sq(cqp);
  553. status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
  554. &compl_info);
  555. return status;
  556. }
  557. /**
  558. * irdma_puda_qp_create - create qp for resource
  559. * @rsrc: resource to use for buffer
  560. */
  561. static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
  562. {
  563. struct irdma_sc_qp *qp = &rsrc->qp;
  564. struct irdma_qp_uk *ukqp = &qp->qp_uk;
  565. int ret = 0;
  566. u32 sq_size, rq_size;
  567. struct irdma_dma_mem *mem;
  568. sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
  569. rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
  570. rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
  571. IRDMA_HW_PAGE_SIZE);
  572. rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
  573. rsrc->qpmem.size, &rsrc->qpmem.pa,
  574. GFP_KERNEL);
  575. if (!rsrc->qpmem.va)
  576. return -ENOMEM;
  577. mem = &rsrc->qpmem;
  578. memset(mem->va, 0, rsrc->qpmem.size);
  579. qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
  580. qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
  581. qp->pd = &rsrc->sc_pd;
  582. qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
  583. qp->dev = rsrc->dev;
  584. qp->qp_uk.back_qp = rsrc;
  585. qp->sq_pa = mem->pa;
  586. qp->rq_pa = qp->sq_pa + sq_size;
  587. qp->vsi = rsrc->vsi;
  588. ukqp->sq_base = mem->va;
  589. ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
  590. ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
  591. ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
  592. qp->shadow_area_pa = qp->rq_pa + rq_size;
  593. qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
  594. qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
  595. qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
  596. ukqp->qp_id = rsrc->qp_id;
  597. ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
  598. ukqp->rq_wrid_array = rsrc->rq_wrid_array;
  599. ukqp->sq_size = rsrc->sq_size;
  600. ukqp->rq_size = rsrc->rq_size;
  601. IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
  602. IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
  603. IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
  604. ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
  605. ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
  606. if (ret) {
  607. dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
  608. rsrc->qpmem.va, rsrc->qpmem.pa);
  609. rsrc->qpmem.va = NULL;
  610. return ret;
  611. }
  612. irdma_qp_add_qos(qp);
  613. irdma_puda_qp_setctx(rsrc);
  614. if (rsrc->dev->ceq_valid)
  615. ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
  616. else
  617. ret = irdma_puda_qp_wqe(rsrc->dev, qp);
  618. if (ret) {
  619. irdma_qp_rem_qos(qp);
  620. rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
  621. dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
  622. rsrc->qpmem.va, rsrc->qpmem.pa);
  623. rsrc->qpmem.va = NULL;
  624. }
  625. return ret;
  626. }
  627. /**
  628. * irdma_puda_cq_wqe - setup wqe for CQ create
  629. * @dev: Device
  630. * @cq: resource for cq
  631. */
  632. static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
  633. {
  634. __le64 *wqe;
  635. struct irdma_sc_cqp *cqp;
  636. u64 hdr;
  637. struct irdma_ccq_cqe_info compl_info;
  638. int status = 0;
  639. cqp = dev->cqp;
  640. wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
  641. if (!wqe)
  642. return -ENOMEM;
  643. set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
  644. set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
  645. set_64bit_val(wqe, 16,
  646. FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
  647. set_64bit_val(wqe, 32, cq->cq_pa);
  648. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  649. set_64bit_val(wqe, 56,
  650. FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
  651. FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
  652. hdr = cq->cq_uk.cq_id |
  653. FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
  654. FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
  655. FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
  656. FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
  657. FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
  658. dma_wmb(); /* make sure WQE is written before valid bit is set */
  659. set_64bit_val(wqe, 24, hdr);
  660. print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
  661. 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
  662. irdma_sc_cqp_post_sq(dev->cqp);
  663. status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
  664. &compl_info);
  665. if (!status) {
  666. struct irdma_sc_ceq *ceq = dev->ceq[0];
  667. if (ceq && ceq->reg_cq)
  668. status = irdma_sc_add_cq_ctx(ceq, cq);
  669. }
  670. return status;
  671. }
  672. /**
  673. * irdma_puda_cq_create - create cq for resource
  674. * @rsrc: resource for which cq to create
  675. */
  676. static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
  677. {
  678. struct irdma_sc_dev *dev = rsrc->dev;
  679. struct irdma_sc_cq *cq = &rsrc->cq;
  680. int ret = 0;
  681. u32 cqsize;
  682. struct irdma_dma_mem *mem;
  683. struct irdma_cq_init_info info = {};
  684. struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
  685. cq->vsi = rsrc->vsi;
  686. cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
  687. rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
  688. IRDMA_CQ0_ALIGNMENT);
  689. rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
  690. &rsrc->cqmem.pa, GFP_KERNEL);
  691. if (!rsrc->cqmem.va)
  692. return -ENOMEM;
  693. mem = &rsrc->cqmem;
  694. info.dev = dev;
  695. info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
  696. IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
  697. info.shadow_read_threshold = rsrc->cq_size >> 2;
  698. info.cq_base_pa = mem->pa;
  699. info.shadow_area_pa = mem->pa + cqsize;
  700. init_info->cq_base = mem->va;
  701. init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);
  702. init_info->cq_size = rsrc->cq_size;
  703. init_info->cq_id = rsrc->cq_id;
  704. info.ceqe_mask = true;
  705. info.ceq_id_valid = true;
  706. info.vsi = rsrc->vsi;
  707. ret = irdma_sc_cq_init(cq, &info);
  708. if (ret)
  709. goto error;
  710. if (rsrc->dev->ceq_valid)
  711. ret = irdma_cqp_cq_create_cmd(dev, cq);
  712. else
  713. ret = irdma_puda_cq_wqe(dev, cq);
  714. error:
  715. if (ret) {
  716. dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
  717. rsrc->cqmem.va, rsrc->cqmem.pa);
  718. rsrc->cqmem.va = NULL;
  719. }
  720. return ret;
  721. }
  722. /**
  723. * irdma_puda_free_qp - free qp for resource
  724. * @rsrc: resource for which qp to free
  725. */
  726. static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
  727. {
  728. int ret;
  729. struct irdma_ccq_cqe_info compl_info;
  730. struct irdma_sc_dev *dev = rsrc->dev;
  731. if (rsrc->dev->ceq_valid) {
  732. irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
  733. rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
  734. return;
  735. }
  736. ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
  737. if (ret)
  738. ibdev_dbg(to_ibdev(dev),
  739. "PUDA: error puda qp destroy wqe, status = %d\n",
  740. ret);
  741. if (!ret) {
  742. ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
  743. &compl_info);
  744. if (ret)
  745. ibdev_dbg(to_ibdev(dev),
  746. "PUDA: error puda qp destroy failed, status = %d\n",
  747. ret);
  748. }
  749. rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
  750. }
  751. /**
  752. * irdma_puda_free_cq - free cq for resource
  753. * @rsrc: resource for which cq to free
  754. */
  755. static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
  756. {
  757. int ret;
  758. struct irdma_ccq_cqe_info compl_info;
  759. struct irdma_sc_dev *dev = rsrc->dev;
  760. if (rsrc->dev->ceq_valid) {
  761. irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
  762. return;
  763. }
  764. ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
  765. if (ret)
  766. ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
  767. if (!ret) {
  768. ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
  769. &compl_info);
  770. if (ret)
  771. ibdev_dbg(to_ibdev(dev),
  772. "PUDA: error ieq qp destroy done\n");
  773. }
  774. }
  775. /**
  776. * irdma_puda_dele_rsrc - delete all resources during close
  777. * @vsi: VSI structure of device
  778. * @type: type of resource to dele
  779. * @reset: true if reset chip
  780. */
  781. void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
  782. bool reset)
  783. {
  784. struct irdma_sc_dev *dev = vsi->dev;
  785. struct irdma_puda_rsrc *rsrc;
  786. struct irdma_puda_buf *buf = NULL;
  787. struct irdma_puda_buf *nextbuf = NULL;
  788. struct irdma_virt_mem *vmem;
  789. struct irdma_sc_ceq *ceq;
  790. ceq = vsi->dev->ceq[0];
  791. switch (type) {
  792. case IRDMA_PUDA_RSRC_TYPE_ILQ:
  793. rsrc = vsi->ilq;
  794. vmem = &vsi->ilq_mem;
  795. vsi->ilq = NULL;
  796. if (ceq && ceq->reg_cq)
  797. irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
  798. break;
  799. case IRDMA_PUDA_RSRC_TYPE_IEQ:
  800. rsrc = vsi->ieq;
  801. vmem = &vsi->ieq_mem;
  802. vsi->ieq = NULL;
  803. if (ceq && ceq->reg_cq)
  804. irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
  805. break;
  806. default:
  807. ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
  808. type);
  809. return;
  810. }
  811. switch (rsrc->cmpl) {
  812. case PUDA_HASH_CRC_COMPLETE:
  813. irdma_free_hash_desc(rsrc->hash_desc);
  814. fallthrough;
  815. case PUDA_QP_CREATED:
  816. irdma_qp_rem_qos(&rsrc->qp);
  817. if (!reset)
  818. irdma_puda_free_qp(rsrc);
  819. dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
  820. rsrc->qpmem.va, rsrc->qpmem.pa);
  821. rsrc->qpmem.va = NULL;
  822. fallthrough;
  823. case PUDA_CQ_CREATED:
  824. if (!reset)
  825. irdma_puda_free_cq(rsrc);
  826. dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
  827. rsrc->cqmem.va, rsrc->cqmem.pa);
  828. rsrc->cqmem.va = NULL;
  829. break;
  830. default:
  831. ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
  832. break;
  833. }
  834. /* Free all allocated puda buffers for both tx and rx */
  835. buf = rsrc->alloclist;
  836. while (buf) {
  837. nextbuf = buf->next;
  838. irdma_puda_dele_buf(dev, buf);
  839. buf = nextbuf;
  840. rsrc->alloc_buf_count--;
  841. }
  842. kfree(vmem->va);
  843. }
  844. /**
  845. * irdma_puda_allocbufs - allocate buffers for resource
  846. * @rsrc: resource for buffer allocation
  847. * @count: number of buffers to create
  848. */
  849. static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
  850. {
  851. u32 i;
  852. struct irdma_puda_buf *buf;
  853. struct irdma_puda_buf *nextbuf;
  854. for (i = 0; i < count; i++) {
  855. buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
  856. if (!buf) {
  857. rsrc->stats_buf_alloc_fail++;
  858. return -ENOMEM;
  859. }
  860. irdma_puda_ret_bufpool(rsrc, buf);
  861. rsrc->alloc_buf_count++;
  862. if (!rsrc->alloclist) {
  863. rsrc->alloclist = buf;
  864. } else {
  865. nextbuf = rsrc->alloclist;
  866. rsrc->alloclist = buf;
  867. buf->next = nextbuf;
  868. }
  869. }
  870. rsrc->avail_buf_count = rsrc->alloc_buf_count;
  871. return 0;
  872. }
  873. /**
  874. * irdma_puda_create_rsrc - create resource (ilq or ieq)
  875. * @vsi: sc VSI struct
  876. * @info: resource information
  877. */
  878. int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
  879. struct irdma_puda_rsrc_info *info)
  880. {
  881. struct irdma_sc_dev *dev = vsi->dev;
  882. int ret = 0;
  883. struct irdma_puda_rsrc *rsrc;
  884. u32 pudasize;
  885. u32 sqwridsize, rqwridsize;
  886. struct irdma_virt_mem *vmem;
  887. info->count = 1;
  888. pudasize = sizeof(struct irdma_puda_rsrc);
  889. sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
  890. rqwridsize = info->rq_size * 8;
  891. switch (info->type) {
  892. case IRDMA_PUDA_RSRC_TYPE_ILQ:
  893. vmem = &vsi->ilq_mem;
  894. break;
  895. case IRDMA_PUDA_RSRC_TYPE_IEQ:
  896. vmem = &vsi->ieq_mem;
  897. break;
  898. default:
  899. return -EOPNOTSUPP;
  900. }
  901. vmem->size = pudasize + sqwridsize + rqwridsize;
  902. vmem->va = kzalloc(vmem->size, GFP_KERNEL);
  903. if (!vmem->va)
  904. return -ENOMEM;
  905. rsrc = vmem->va;
  906. spin_lock_init(&rsrc->bufpool_lock);
  907. switch (info->type) {
  908. case IRDMA_PUDA_RSRC_TYPE_ILQ:
  909. vsi->ilq = vmem->va;
  910. vsi->ilq_count = info->count;
  911. rsrc->receive = info->receive;
  912. rsrc->xmit_complete = info->xmit_complete;
  913. break;
  914. case IRDMA_PUDA_RSRC_TYPE_IEQ:
  915. vsi->ieq_count = info->count;
  916. vsi->ieq = vmem->va;
  917. rsrc->receive = irdma_ieq_receive;
  918. rsrc->xmit_complete = irdma_ieq_tx_compl;
  919. break;
  920. default:
  921. return -EOPNOTSUPP;
  922. }
  923. rsrc->type = info->type;
  924. rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
  925. ((u8 *)vmem->va + pudasize);
  926. rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
  927. /* Initialize all ieq lists */
  928. INIT_LIST_HEAD(&rsrc->bufpool);
  929. INIT_LIST_HEAD(&rsrc->txpend);
  930. rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
  931. irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
  932. rsrc->qp_id = info->qp_id;
  933. rsrc->cq_id = info->cq_id;
  934. rsrc->sq_size = info->sq_size;
  935. rsrc->rq_size = info->rq_size;
  936. rsrc->cq_size = info->rq_size + info->sq_size;
  937. if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  938. if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
  939. rsrc->cq_size += info->rq_size;
  940. }
  941. rsrc->buf_size = info->buf_size;
  942. rsrc->dev = dev;
  943. rsrc->vsi = vsi;
  944. rsrc->stats_idx = info->stats_idx;
  945. rsrc->stats_idx_valid = info->stats_idx_valid;
  946. ret = irdma_puda_cq_create(rsrc);
  947. if (!ret) {
  948. rsrc->cmpl = PUDA_CQ_CREATED;
  949. ret = irdma_puda_qp_create(rsrc);
  950. }
  951. if (ret) {
  952. ibdev_dbg(to_ibdev(dev),
  953. "PUDA: error qp_create type=%d, status=%d\n",
  954. rsrc->type, ret);
  955. goto error;
  956. }
  957. rsrc->cmpl = PUDA_QP_CREATED;
  958. ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
  959. if (ret) {
  960. ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n");
  961. goto error;
  962. }
  963. rsrc->rxq_invalid_cnt = info->rq_size;
  964. ret = irdma_puda_replenish_rq(rsrc, true);
  965. if (ret)
  966. goto error;
  967. if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
  968. if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
  969. rsrc->check_crc = true;
  970. rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
  971. ret = 0;
  972. }
  973. }
  974. irdma_sc_ccq_arm(&rsrc->cq);
  975. return ret;
  976. error:
  977. irdma_puda_dele_rsrc(vsi, info->type, false);
  978. return ret;
  979. }
  980. /**
  981. * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
  982. * @qp: ilq's qp resource
  983. * @buf: puda buffer for rcv q
  984. * @wqe_idx: wqe index of completed rcvbuf
  985. */
  986. static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
  987. struct irdma_puda_buf *buf, u32 wqe_idx)
  988. {
  989. __le64 *wqe;
  990. u64 offset8, offset24;
  991. /* Synch buffer for use by device */
  992. dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa,
  993. buf->mem.size, DMA_BIDIRECTIONAL);
  994. wqe = qp->qp_uk.rq_base[wqe_idx].elem;
  995. get_64bit_val(wqe, 24, &offset24);
  996. if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  997. get_64bit_val(wqe, 8, &offset8);
  998. if (offset24)
  999. offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
  1000. else
  1001. offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
  1002. set_64bit_val(wqe, 8, offset8);
  1003. dma_wmb(); /* make sure WQE is written before valid bit is set */
  1004. }
  1005. if (offset24)
  1006. offset24 = 0;
  1007. else
  1008. offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
  1009. set_64bit_val(wqe, 24, offset24);
  1010. }
  1011. /**
  1012. * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
  1013. * @pfpdu: pointer to fpdu
  1014. * @datap: pointer to data in the buffer
  1015. * @rcv_seq: seqnum of the data buffer
  1016. */
  1017. static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
  1018. u32 rcv_seq)
  1019. {
  1020. u32 marker_seq, end_seq, blk_start;
  1021. u8 marker_len = pfpdu->marker_len;
  1022. u16 total_len = 0;
  1023. u16 fpdu_len;
  1024. blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
  1025. if (!blk_start) {
  1026. total_len = marker_len;
  1027. marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
  1028. if (marker_len && *(u32 *)datap)
  1029. return 0;
  1030. } else {
  1031. marker_seq = rcv_seq + blk_start;
  1032. }
  1033. datap += total_len;
  1034. fpdu_len = ntohs(*(__be16 *)datap);
  1035. fpdu_len += IRDMA_IEQ_MPA_FRAMING;
  1036. fpdu_len = (fpdu_len + 3) & 0xfffc;
  1037. if (fpdu_len > pfpdu->max_fpdu_data)
  1038. return 0;
  1039. total_len += fpdu_len;
  1040. end_seq = rcv_seq + total_len;
  1041. while ((int)(marker_seq - end_seq) < 0) {
  1042. total_len += marker_len;
  1043. end_seq += marker_len;
  1044. marker_seq += IRDMA_MRK_BLK_SZ;
  1045. }
  1046. return total_len;
  1047. }
  1048. /**
  1049. * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
  1050. * @buf: rcv buffer with partial
  1051. * @txbuf: tx buffer for sending back
  1052. * @buf_offset: rcv buffer offset to copy from
  1053. * @txbuf_offset: at offset in tx buf to copy
  1054. * @len: length of data to copy
  1055. */
  1056. static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
  1057. struct irdma_puda_buf *txbuf,
  1058. u16 buf_offset, u32 txbuf_offset, u32 len)
  1059. {
  1060. void *mem1 = (u8 *)buf->mem.va + buf_offset;
  1061. void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
  1062. memcpy(mem2, mem1, len);
  1063. }
  1064. /**
  1065. * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
  1066. * @buf: reeive buffer with partial
  1067. * @txbuf: buffer to prepare
  1068. */
  1069. static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
  1070. struct irdma_puda_buf *txbuf)
  1071. {
  1072. txbuf->tcphlen = buf->tcphlen;
  1073. txbuf->ipv4 = buf->ipv4;
  1074. if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  1075. txbuf->hdrlen = txbuf->tcphlen;
  1076. irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
  1077. txbuf->hdrlen);
  1078. } else {
  1079. txbuf->maclen = buf->maclen;
  1080. txbuf->hdrlen = buf->hdrlen;
  1081. irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
  1082. }
  1083. }
  1084. /**
  1085. * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
  1086. * @buf: receive exception buffer
  1087. * @fps: first partial sequence number
  1088. */
  1089. static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
  1090. {
  1091. u32 offset;
  1092. if (buf->seqnum < fps) {
  1093. offset = fps - buf->seqnum;
  1094. if (offset > buf->datalen)
  1095. return;
  1096. buf->data += offset;
  1097. buf->datalen -= (u16)offset;
  1098. buf->seqnum = fps;
  1099. }
  1100. }
  1101. /**
  1102. * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
  1103. * @ieq: ieq resource
  1104. * @rxlist: ieq's received buffer list
  1105. * @pbufl: temporary list for buffers for fpddu
  1106. * @txbuf: tx buffer for fpdu
  1107. * @fpdu_len: total length of fpdu
  1108. */
  1109. static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
  1110. struct list_head *rxlist,
  1111. struct list_head *pbufl,
  1112. struct irdma_puda_buf *txbuf, u16 fpdu_len)
  1113. {
  1114. struct irdma_puda_buf *buf;
  1115. u32 nextseqnum;
  1116. u16 txoffset, bufoffset;
  1117. buf = irdma_puda_get_listbuf(pbufl);
  1118. if (!buf)
  1119. return;
  1120. nextseqnum = buf->seqnum + fpdu_len;
  1121. irdma_ieq_setup_tx_buf(buf, txbuf);
  1122. if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  1123. txoffset = txbuf->hdrlen;
  1124. txbuf->totallen = txbuf->hdrlen + fpdu_len;
  1125. txbuf->data = (u8 *)txbuf->mem.va + txoffset;
  1126. } else {
  1127. txoffset = buf->hdrlen;
  1128. txbuf->totallen = buf->hdrlen + fpdu_len;
  1129. txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
  1130. }
  1131. bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
  1132. do {
  1133. if (buf->datalen >= fpdu_len) {
  1134. /* copied full fpdu */
  1135. irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
  1136. fpdu_len);
  1137. buf->datalen -= fpdu_len;
  1138. buf->data += fpdu_len;
  1139. buf->seqnum = nextseqnum;
  1140. break;
  1141. }
  1142. /* copy partial fpdu */
  1143. irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
  1144. buf->datalen);
  1145. txoffset += buf->datalen;
  1146. fpdu_len -= buf->datalen;
  1147. irdma_puda_ret_bufpool(ieq, buf);
  1148. buf = irdma_puda_get_listbuf(pbufl);
  1149. if (!buf)
  1150. return;
  1151. bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
  1152. } while (1);
  1153. /* last buffer on the list*/
  1154. if (buf->datalen)
  1155. list_add(&buf->list, rxlist);
  1156. else
  1157. irdma_puda_ret_bufpool(ieq, buf);
  1158. }
  1159. /**
  1160. * irdma_ieq_create_pbufl - create buffer list for single fpdu
  1161. * @pfpdu: pointer to fpdu
  1162. * @rxlist: resource list for receive ieq buffes
  1163. * @pbufl: temp. list for buffers for fpddu
  1164. * @buf: first receive buffer
  1165. * @fpdu_len: total length of fpdu
  1166. */
  1167. static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
  1168. struct list_head *rxlist,
  1169. struct list_head *pbufl,
  1170. struct irdma_puda_buf *buf, u16 fpdu_len)
  1171. {
  1172. int status = 0;
  1173. struct irdma_puda_buf *nextbuf;
  1174. u32 nextseqnum;
  1175. u16 plen = fpdu_len - buf->datalen;
  1176. bool done = false;
  1177. nextseqnum = buf->seqnum + buf->datalen;
  1178. do {
  1179. nextbuf = irdma_puda_get_listbuf(rxlist);
  1180. if (!nextbuf) {
  1181. status = -ENOBUFS;
  1182. break;
  1183. }
  1184. list_add_tail(&nextbuf->list, pbufl);
  1185. if (nextbuf->seqnum != nextseqnum) {
  1186. pfpdu->bad_seq_num++;
  1187. status = -ERANGE;
  1188. break;
  1189. }
  1190. if (nextbuf->datalen >= plen) {
  1191. done = true;
  1192. } else {
  1193. plen -= nextbuf->datalen;
  1194. nextseqnum = nextbuf->seqnum + nextbuf->datalen;
  1195. }
  1196. } while (!done);
  1197. return status;
  1198. }
  1199. /**
  1200. * irdma_ieq_handle_partial - process partial fpdu buffer
  1201. * @ieq: ieq resource
  1202. * @pfpdu: partial management per user qp
  1203. * @buf: receive buffer
  1204. * @fpdu_len: fpdu len in the buffer
  1205. */
  1206. static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
  1207. struct irdma_pfpdu *pfpdu,
  1208. struct irdma_puda_buf *buf, u16 fpdu_len)
  1209. {
  1210. int status = 0;
  1211. u8 *crcptr;
  1212. u32 mpacrc;
  1213. u32 seqnum = buf->seqnum;
  1214. struct list_head pbufl; /* partial buffer list */
  1215. struct irdma_puda_buf *txbuf = NULL;
  1216. struct list_head *rxlist = &pfpdu->rxlist;
  1217. ieq->partials_handled++;
  1218. INIT_LIST_HEAD(&pbufl);
  1219. list_add(&buf->list, &pbufl);
  1220. status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
  1221. if (status)
  1222. goto error;
  1223. txbuf = irdma_puda_get_bufpool(ieq);
  1224. if (!txbuf) {
  1225. pfpdu->no_tx_bufs++;
  1226. status = -ENOBUFS;
  1227. goto error;
  1228. }
  1229. irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
  1230. irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
  1231. crcptr = txbuf->data + fpdu_len - 4;
  1232. mpacrc = *(u32 *)crcptr;
  1233. if (ieq->check_crc) {
  1234. status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
  1235. (fpdu_len - 4), mpacrc);
  1236. if (status) {
  1237. ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
  1238. goto error;
  1239. }
  1240. }
  1241. print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
  1242. txbuf->mem.va, txbuf->totallen, false);
  1243. if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
  1244. txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
  1245. txbuf->do_lpb = true;
  1246. irdma_puda_send_buf(ieq, txbuf);
  1247. pfpdu->rcv_nxt = seqnum + fpdu_len;
  1248. return status;
  1249. error:
  1250. while (!list_empty(&pbufl)) {
  1251. buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
  1252. list_move(&buf->list, rxlist);
  1253. }
  1254. if (txbuf)
  1255. irdma_puda_ret_bufpool(ieq, txbuf);
  1256. return status;
  1257. }
  1258. /**
  1259. * irdma_ieq_process_buf - process buffer rcvd for ieq
  1260. * @ieq: ieq resource
  1261. * @pfpdu: partial management per user qp
  1262. * @buf: receive buffer
  1263. */
  1264. static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
  1265. struct irdma_pfpdu *pfpdu,
  1266. struct irdma_puda_buf *buf)
  1267. {
  1268. u16 fpdu_len = 0;
  1269. u16 datalen = buf->datalen;
  1270. u8 *datap = buf->data;
  1271. u8 *crcptr;
  1272. u16 ioffset = 0;
  1273. u32 mpacrc;
  1274. u32 seqnum = buf->seqnum;
  1275. u16 len = 0;
  1276. u16 full = 0;
  1277. bool partial = false;
  1278. struct irdma_puda_buf *txbuf;
  1279. struct list_head *rxlist = &pfpdu->rxlist;
  1280. int ret = 0;
  1281. ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
  1282. while (datalen) {
  1283. fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
  1284. if (!fpdu_len) {
  1285. ibdev_dbg(to_ibdev(ieq->dev),
  1286. "IEQ: error bad fpdu len\n");
  1287. list_add(&buf->list, rxlist);
  1288. return -EINVAL;
  1289. }
  1290. if (datalen < fpdu_len) {
  1291. partial = true;
  1292. break;
  1293. }
  1294. crcptr = datap + fpdu_len - 4;
  1295. mpacrc = *(u32 *)crcptr;
  1296. if (ieq->check_crc)
  1297. ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
  1298. fpdu_len - 4, mpacrc);
  1299. if (ret) {
  1300. list_add(&buf->list, rxlist);
  1301. ibdev_dbg(to_ibdev(ieq->dev),
  1302. "ERR: IRDMA_ERR_MPA_CRC\n");
  1303. return -EINVAL;
  1304. }
  1305. full++;
  1306. pfpdu->fpdu_processed++;
  1307. ieq->fpdu_processed++;
  1308. datap += fpdu_len;
  1309. len += fpdu_len;
  1310. datalen -= fpdu_len;
  1311. }
  1312. if (full) {
  1313. /* copy full pdu's in the txbuf and send them out */
  1314. txbuf = irdma_puda_get_bufpool(ieq);
  1315. if (!txbuf) {
  1316. pfpdu->no_tx_bufs++;
  1317. list_add(&buf->list, rxlist);
  1318. return -ENOBUFS;
  1319. }
  1320. /* modify txbuf's buffer header */
  1321. irdma_ieq_setup_tx_buf(buf, txbuf);
  1322. /* copy full fpdu's to new buffer */
  1323. if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
  1324. irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
  1325. txbuf->hdrlen, len);
  1326. txbuf->totallen = txbuf->hdrlen + len;
  1327. txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
  1328. } else {
  1329. irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
  1330. buf->hdrlen, len);
  1331. txbuf->totallen = buf->hdrlen + len;
  1332. }
  1333. irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
  1334. print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET,
  1335. 16, 8, txbuf->mem.va, txbuf->totallen,
  1336. false);
  1337. txbuf->do_lpb = true;
  1338. irdma_puda_send_buf(ieq, txbuf);
  1339. if (!datalen) {
  1340. pfpdu->rcv_nxt = buf->seqnum + len;
  1341. irdma_puda_ret_bufpool(ieq, buf);
  1342. return 0;
  1343. }
  1344. buf->data = datap;
  1345. buf->seqnum = seqnum + len;
  1346. buf->datalen = datalen;
  1347. pfpdu->rcv_nxt = buf->seqnum;
  1348. }
  1349. if (partial)
  1350. return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
  1351. return 0;
  1352. }
  1353. /**
  1354. * irdma_ieq_process_fpdus - process fpdu's buffers on its list
  1355. * @qp: qp for which partial fpdus
  1356. * @ieq: ieq resource
  1357. */
  1358. void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
  1359. struct irdma_puda_rsrc *ieq)
  1360. {
  1361. struct irdma_pfpdu *pfpdu = &qp->pfpdu;
  1362. struct list_head *rxlist = &pfpdu->rxlist;
  1363. struct irdma_puda_buf *buf;
  1364. int status;
  1365. do {
  1366. if (list_empty(rxlist))
  1367. break;
  1368. buf = irdma_puda_get_listbuf(rxlist);
  1369. if (!buf) {
  1370. ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
  1371. break;
  1372. }
  1373. if (buf->seqnum != pfpdu->rcv_nxt) {
  1374. /* This could be out of order or missing packet */
  1375. pfpdu->out_of_order++;
  1376. list_add(&buf->list, rxlist);
  1377. break;
  1378. }
  1379. /* keep processing buffers from the head of the list */
  1380. status = irdma_ieq_process_buf(ieq, pfpdu, buf);
  1381. if (status == -EINVAL) {
  1382. pfpdu->mpa_crc_err = true;
  1383. while (!list_empty(rxlist)) {
  1384. buf = irdma_puda_get_listbuf(rxlist);
  1385. irdma_puda_ret_bufpool(ieq, buf);
  1386. pfpdu->crc_err++;
  1387. ieq->crc_err++;
  1388. }
  1389. /* create CQP for AE */
  1390. irdma_ieq_mpa_crc_ae(ieq->dev, qp);
  1391. }
  1392. } while (!status);
  1393. }
  1394. /**
  1395. * irdma_ieq_create_ah - create an address handle for IEQ
  1396. * @qp: qp pointer
  1397. * @buf: buf received on IEQ used to create AH
  1398. */
  1399. static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
  1400. {
  1401. struct irdma_ah_info ah_info = {};
  1402. qp->pfpdu.ah_buf = buf;
  1403. irdma_puda_ieq_get_ah_info(qp, &ah_info);
  1404. return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
  1405. IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
  1406. &qp->pfpdu.ah);
  1407. }
  1408. /**
  1409. * irdma_ieq_handle_exception - handle qp's exception
  1410. * @ieq: ieq resource
  1411. * @qp: qp receiving excpetion
  1412. * @buf: receive buffer
  1413. */
  1414. static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
  1415. struct irdma_sc_qp *qp,
  1416. struct irdma_puda_buf *buf)
  1417. {
  1418. struct irdma_pfpdu *pfpdu = &qp->pfpdu;
  1419. u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
  1420. u32 rcv_wnd = hw_host_ctx[23];
  1421. /* first partial seq # in q2 */
  1422. u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
  1423. struct list_head *rxlist = &pfpdu->rxlist;
  1424. unsigned long flags = 0;
  1425. u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
  1426. print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
  1427. buf->mem.va, buf->totallen, false);
  1428. spin_lock_irqsave(&pfpdu->lock, flags);
  1429. pfpdu->total_ieq_bufs++;
  1430. if (pfpdu->mpa_crc_err) {
  1431. pfpdu->crc_err++;
  1432. goto error;
  1433. }
  1434. if (pfpdu->mode && fps != pfpdu->fps) {
  1435. /* clean up qp as it is new partial sequence */
  1436. irdma_ieq_cleanup_qp(ieq, qp);
  1437. ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
  1438. pfpdu->mode = false;
  1439. }
  1440. if (!pfpdu->mode) {
  1441. print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16,
  1442. 8, (u64 *)qp->q2_buf, 128, false);
  1443. /* First_Partial_Sequence_Number check */
  1444. pfpdu->rcv_nxt = fps;
  1445. pfpdu->fps = fps;
  1446. pfpdu->mode = true;
  1447. pfpdu->max_fpdu_data = (buf->ipv4) ?
  1448. (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
  1449. (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
  1450. pfpdu->pmode_count++;
  1451. ieq->pmode_count++;
  1452. INIT_LIST_HEAD(rxlist);
  1453. irdma_ieq_check_first_buf(buf, fps);
  1454. }
  1455. if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
  1456. pfpdu->bad_seq_num++;
  1457. ieq->bad_seq_num++;
  1458. goto error;
  1459. }
  1460. if (!list_empty(rxlist)) {
  1461. if (buf->seqnum != pfpdu->nextseqnum) {
  1462. irdma_send_ieq_ack(qp);
  1463. /* throw away out-of-order, duplicates*/
  1464. goto error;
  1465. }
  1466. }
  1467. /* Insert buf before head */
  1468. list_add_tail(&buf->list, rxlist);
  1469. pfpdu->nextseqnum = buf->seqnum + buf->datalen;
  1470. pfpdu->lastrcv_buf = buf;
  1471. if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
  1472. irdma_ieq_create_ah(qp, buf);
  1473. if (!pfpdu->ah)
  1474. goto error;
  1475. goto exit;
  1476. }
  1477. if (hw_rev == IRDMA_GEN_1)
  1478. irdma_ieq_process_fpdus(qp, ieq);
  1479. else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
  1480. irdma_ieq_process_fpdus(qp, ieq);
  1481. exit:
  1482. spin_unlock_irqrestore(&pfpdu->lock, flags);
  1483. return;
  1484. error:
  1485. irdma_puda_ret_bufpool(ieq, buf);
  1486. spin_unlock_irqrestore(&pfpdu->lock, flags);
  1487. }
  1488. /**
  1489. * irdma_ieq_receive - received exception buffer
  1490. * @vsi: VSI of device
  1491. * @buf: exception buffer received
  1492. */
  1493. static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
  1494. struct irdma_puda_buf *buf)
  1495. {
  1496. struct irdma_puda_rsrc *ieq = vsi->ieq;
  1497. struct irdma_sc_qp *qp = NULL;
  1498. u32 wqe_idx = ieq->compl_rxwqe_idx;
  1499. qp = irdma_ieq_get_qp(vsi->dev, buf);
  1500. if (!qp) {
  1501. ieq->stats_bad_qp_id++;
  1502. irdma_puda_ret_bufpool(ieq, buf);
  1503. } else {
  1504. irdma_ieq_handle_exception(ieq, qp, buf);
  1505. }
  1506. /*
  1507. * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()
  1508. * on which wqe_idx to start replenish rq
  1509. */
  1510. if (!ieq->rxq_invalid_cnt)
  1511. ieq->rx_wqe_idx = wqe_idx;
  1512. ieq->rxq_invalid_cnt++;
  1513. }
  1514. /**
  1515. * irdma_ieq_tx_compl - put back after sending completed exception buffer
  1516. * @vsi: sc VSI struct
  1517. * @sqwrid: pointer to puda buffer
  1518. */
  1519. static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
  1520. {
  1521. struct irdma_puda_rsrc *ieq = vsi->ieq;
  1522. struct irdma_puda_buf *buf = sqwrid;
  1523. irdma_puda_ret_bufpool(ieq, buf);
  1524. }
  1525. /**
  1526. * irdma_ieq_cleanup_qp - qp is being destroyed
  1527. * @ieq: ieq resource
  1528. * @qp: all pending fpdu buffers
  1529. */
  1530. void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
  1531. {
  1532. struct irdma_puda_buf *buf;
  1533. struct irdma_pfpdu *pfpdu = &qp->pfpdu;
  1534. struct list_head *rxlist = &pfpdu->rxlist;
  1535. if (qp->pfpdu.ah) {
  1536. irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
  1537. qp->pfpdu.ah = NULL;
  1538. qp->pfpdu.ah_buf = NULL;
  1539. }
  1540. if (!pfpdu->mode)
  1541. return;
  1542. while (!list_empty(rxlist)) {
  1543. buf = irdma_puda_get_listbuf(rxlist);
  1544. irdma_puda_ret_bufpool(ieq, buf);
  1545. }
  1546. }