cxgbit_target.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016 Chelsio Communications, Inc.
  4. */
  5. #include <linux/workqueue.h>
  6. #include <linux/kthread.h>
  7. #include <linux/sched/signal.h>
  8. #include <asm/unaligned.h>
  9. #include <net/tcp.h>
  10. #include <target/target_core_base.h>
  11. #include <target/target_core_fabric.h>
  12. #include "cxgbit.h"
  13. struct sge_opaque_hdr {
  14. void *dev;
  15. dma_addr_t addr[MAX_SKB_FRAGS + 1];
  16. };
  17. static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
  18. #define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
  19. sizeof(struct fw_ofld_tx_data_wr))
  20. static struct sk_buff *
  21. __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
  22. {
  23. struct sk_buff *skb = NULL;
  24. u8 submode = 0;
  25. int errcode;
  26. static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
  27. if (len) {
  28. skb = alloc_skb_with_frags(hdr_len, len,
  29. 0, &errcode,
  30. GFP_KERNEL);
  31. if (!skb)
  32. return NULL;
  33. skb_reserve(skb, TX_HDR_LEN);
  34. skb_reset_transport_header(skb);
  35. __skb_put(skb, ISCSI_HDR_LEN);
  36. skb->data_len = len;
  37. skb->len += len;
  38. submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
  39. } else {
  40. u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
  41. skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
  42. if (!skb)
  43. return NULL;
  44. skb_reserve(skb, TX_HDR_LEN + iso_len);
  45. skb_reset_transport_header(skb);
  46. __skb_put(skb, ISCSI_HDR_LEN);
  47. }
  48. submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
  49. cxgbit_skcb_submode(skb) = submode;
  50. cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
  51. cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
  52. return skb;
  53. }
  54. static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
  55. {
  56. return __cxgbit_alloc_skb(csk, len, false);
  57. }
  58. /*
  59. * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
  60. * @skb: the packet
  61. *
  62. * Returns true if a packet can be sent as an offload WR with immediate
  63. * data. We currently use the same limit as for Ethernet packets.
  64. */
  65. static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
  66. {
  67. int length = skb->len;
  68. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
  69. length += sizeof(struct fw_ofld_tx_data_wr);
  70. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
  71. length += sizeof(struct cpl_tx_data_iso);
  72. return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
  73. }
  74. /*
  75. * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
  76. * @n: the number of SGL entries
  77. * Calculates the number of flits needed for a scatter/gather list that
  78. * can hold the given number of entries.
  79. */
  80. static inline unsigned int cxgbit_sgl_len(unsigned int n)
  81. {
  82. n--;
  83. return (3 * n) / 2 + (n & 1) + 2;
  84. }
  85. /*
  86. * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
  87. * @skb: the packet
  88. *
  89. * Returns the number of flits needed for the given offload packet.
  90. * These packets are already fully constructed and no additional headers
  91. * will be added.
  92. */
  93. static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
  94. {
  95. unsigned int flits, cnt;
  96. if (cxgbit_is_ofld_imm(skb))
  97. return DIV_ROUND_UP(skb->len, 8);
  98. flits = skb_transport_offset(skb) / 8;
  99. cnt = skb_shinfo(skb)->nr_frags;
  100. if (skb_tail_pointer(skb) != skb_transport_header(skb))
  101. cnt++;
  102. return flits + cxgbit_sgl_len(cnt);
  103. }
  104. #define CXGBIT_ISO_FSLICE 0x1
  105. #define CXGBIT_ISO_LSLICE 0x2
  106. static void
  107. cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
  108. {
  109. struct cpl_tx_data_iso *cpl;
  110. unsigned int submode = cxgbit_skcb_submode(skb);
  111. unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
  112. unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
  113. cpl = __skb_push(skb, sizeof(*cpl));
  114. cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
  115. CPL_TX_DATA_ISO_FIRST_V(fslice) |
  116. CPL_TX_DATA_ISO_LAST_V(lslice) |
  117. CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
  118. CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
  119. CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
  120. CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
  121. CPL_TX_DATA_ISO_SCSI_V(2));
  122. cpl->ahs_len = 0;
  123. cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
  124. cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
  125. cpl->len = htonl(iso_info->len);
  126. cpl->reserved2_seglen_offset = htonl(0);
  127. cpl->datasn_offset = htonl(0);
  128. cpl->buffer_offset = htonl(0);
  129. cpl->reserved3 = 0;
  130. __skb_pull(skb, sizeof(*cpl));
  131. }
  132. static void
  133. cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
  134. u32 len, u32 credits, u32 compl)
  135. {
  136. struct fw_ofld_tx_data_wr *req;
  137. const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  138. u32 submode = cxgbit_skcb_submode(skb);
  139. u32 wr_ulp_mode = 0;
  140. u32 hdr_size = sizeof(*req);
  141. u32 opcode = FW_OFLD_TX_DATA_WR;
  142. u32 immlen = 0;
  143. u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
  144. T6_TX_FORCE_F;
  145. if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
  146. opcode = FW_ISCSI_TX_DATA_WR;
  147. immlen += sizeof(struct cpl_tx_data_iso);
  148. hdr_size += sizeof(struct cpl_tx_data_iso);
  149. submode |= 8;
  150. }
  151. if (cxgbit_is_ofld_imm(skb))
  152. immlen += dlen;
  153. req = __skb_push(skb, hdr_size);
  154. req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
  155. FW_WR_COMPL_V(compl) |
  156. FW_WR_IMMDLEN_V(immlen));
  157. req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
  158. FW_WR_LEN16_V(credits));
  159. req->plen = htonl(len);
  160. wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
  161. FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
  162. req->tunnel_to_proxy = htonl(wr_ulp_mode | force |
  163. FW_OFLD_TX_DATA_WR_SHOVE_F);
  164. }
  165. static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
  166. {
  167. kfree_skb(skb);
  168. }
  169. void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
  170. {
  171. struct sk_buff *skb;
  172. while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
  173. u32 dlen = skb->len;
  174. u32 len = skb->len;
  175. u32 credits_needed;
  176. u32 compl = 0;
  177. u32 flowclen16 = 0;
  178. u32 iso_cpl_len = 0;
  179. if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
  180. iso_cpl_len = sizeof(struct cpl_tx_data_iso);
  181. if (cxgbit_is_ofld_imm(skb))
  182. credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
  183. else
  184. credits_needed = DIV_ROUND_UP((8 *
  185. cxgbit_calc_tx_flits_ofld(skb)) +
  186. iso_cpl_len, 16);
  187. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
  188. credits_needed += DIV_ROUND_UP(
  189. sizeof(struct fw_ofld_tx_data_wr), 16);
  190. /*
  191. * Assumes the initial credits is large enough to support
  192. * fw_flowc_wr plus largest possible first payload
  193. */
  194. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
  195. flowclen16 = cxgbit_send_tx_flowc_wr(csk);
  196. csk->wr_cred -= flowclen16;
  197. csk->wr_una_cred += flowclen16;
  198. }
  199. if (csk->wr_cred < credits_needed) {
  200. pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
  201. csk, skb->len, skb->data_len,
  202. credits_needed, csk->wr_cred);
  203. break;
  204. }
  205. __skb_unlink(skb, &csk->txq);
  206. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
  207. skb->csum = (__force __wsum)(credits_needed + flowclen16);
  208. csk->wr_cred -= credits_needed;
  209. csk->wr_una_cred += credits_needed;
  210. pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
  211. csk, skb->len, skb->data_len, credits_needed,
  212. csk->wr_cred, csk->wr_una_cred);
  213. if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
  214. len += cxgbit_skcb_tx_extralen(skb);
  215. if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
  216. (!before(csk->write_seq,
  217. csk->snd_una + csk->snd_win))) {
  218. compl = 1;
  219. csk->wr_una_cred = 0;
  220. }
  221. cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
  222. compl);
  223. csk->snd_nxt += len;
  224. } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
  225. (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
  226. struct cpl_close_con_req *req =
  227. (struct cpl_close_con_req *)skb->data;
  228. req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
  229. csk->wr_una_cred = 0;
  230. }
  231. cxgbit_sock_enqueue_wr(csk, skb);
  232. t4_set_arp_err_handler(skb, csk,
  233. cxgbit_arp_failure_skb_discard);
  234. pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
  235. csk, csk->tid, skb, len);
  236. cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  237. }
  238. }
  239. static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
  240. {
  241. struct sk_buff_head backlogq;
  242. struct sk_buff *skb;
  243. void (*fn)(struct cxgbit_sock *, struct sk_buff *);
  244. skb_queue_head_init(&backlogq);
  245. spin_lock_bh(&csk->lock);
  246. while (skb_queue_len(&csk->backlogq)) {
  247. skb_queue_splice_init(&csk->backlogq, &backlogq);
  248. spin_unlock_bh(&csk->lock);
  249. while ((skb = __skb_dequeue(&backlogq))) {
  250. fn = cxgbit_skcb_rx_backlog_fn(skb);
  251. fn(csk, skb);
  252. }
  253. spin_lock_bh(&csk->lock);
  254. }
  255. csk->lock_owner = false;
  256. spin_unlock_bh(&csk->lock);
  257. }
  258. static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  259. {
  260. int ret = 0;
  261. spin_lock_bh(&csk->lock);
  262. csk->lock_owner = true;
  263. spin_unlock_bh(&csk->lock);
  264. if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
  265. signal_pending(current))) {
  266. __kfree_skb(skb);
  267. __skb_queue_purge(&csk->ppodq);
  268. ret = -1;
  269. goto unlock;
  270. }
  271. csk->write_seq += skb->len +
  272. cxgbit_skcb_tx_extralen(skb);
  273. skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
  274. __skb_queue_tail(&csk->txq, skb);
  275. cxgbit_push_tx_frames(csk);
  276. unlock:
  277. cxgbit_unlock_sock(csk);
  278. return ret;
  279. }
  280. static int
  281. cxgbit_map_skb(struct iscsit_cmd *cmd, struct sk_buff *skb, u32 data_offset,
  282. u32 data_length)
  283. {
  284. u32 i = 0, nr_frags = MAX_SKB_FRAGS;
  285. u32 padding = ((-data_length) & 3);
  286. struct scatterlist *sg;
  287. struct page *page;
  288. unsigned int page_off;
  289. if (padding)
  290. nr_frags--;
  291. /*
  292. * We know each entry in t_data_sg contains a page.
  293. */
  294. sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
  295. page_off = (data_offset % PAGE_SIZE);
  296. while (data_length && (i < nr_frags)) {
  297. u32 cur_len = min_t(u32, data_length, sg->length - page_off);
  298. page = sg_page(sg);
  299. get_page(page);
  300. skb_fill_page_desc(skb, i, page, sg->offset + page_off,
  301. cur_len);
  302. skb->data_len += cur_len;
  303. skb->len += cur_len;
  304. skb->truesize += cur_len;
  305. data_length -= cur_len;
  306. page_off = 0;
  307. sg = sg_next(sg);
  308. i++;
  309. }
  310. if (data_length)
  311. return -1;
  312. if (padding) {
  313. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  314. if (!page)
  315. return -1;
  316. skb_fill_page_desc(skb, i, page, 0, padding);
  317. skb->data_len += padding;
  318. skb->len += padding;
  319. skb->truesize += padding;
  320. }
  321. return 0;
  322. }
  323. static int
  324. cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
  325. struct iscsi_datain_req *dr)
  326. {
  327. struct iscsit_conn *conn = csk->conn;
  328. struct sk_buff *skb;
  329. struct iscsi_datain datain;
  330. struct cxgbit_iso_info iso_info;
  331. u32 data_length = cmd->se_cmd.data_length;
  332. u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
  333. u32 num_pdu, plen, tx_data = 0;
  334. bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
  335. SCF_TRANSPORT_TASK_SENSE);
  336. bool set_statsn = false;
  337. int ret = -1;
  338. while (data_length) {
  339. num_pdu = (data_length + mrdsl - 1) / mrdsl;
  340. if (num_pdu > csk->max_iso_npdu)
  341. num_pdu = csk->max_iso_npdu;
  342. plen = num_pdu * mrdsl;
  343. if (plen > data_length)
  344. plen = data_length;
  345. skb = __cxgbit_alloc_skb(csk, 0, true);
  346. if (unlikely(!skb))
  347. return -ENOMEM;
  348. memset(skb->data, 0, ISCSI_HDR_LEN);
  349. cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
  350. cxgbit_skcb_submode(skb) |= (csk->submode &
  351. CXGBIT_SUBMODE_DCRC);
  352. cxgbit_skcb_tx_extralen(skb) = (num_pdu *
  353. cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
  354. ((num_pdu - 1) * ISCSI_HDR_LEN);
  355. memset(&datain, 0, sizeof(struct iscsi_datain));
  356. memset(&iso_info, 0, sizeof(iso_info));
  357. if (!tx_data)
  358. iso_info.flags |= CXGBIT_ISO_FSLICE;
  359. if (!(data_length - plen)) {
  360. iso_info.flags |= CXGBIT_ISO_LSLICE;
  361. if (!task_sense) {
  362. datain.flags = ISCSI_FLAG_DATA_STATUS;
  363. iscsit_increment_maxcmdsn(cmd, conn->sess);
  364. cmd->stat_sn = conn->stat_sn++;
  365. set_statsn = true;
  366. }
  367. }
  368. iso_info.burst_len = num_pdu * mrdsl;
  369. iso_info.mpdu = mrdsl;
  370. iso_info.len = ISCSI_HDR_LEN + plen;
  371. cxgbit_cpl_tx_data_iso(skb, &iso_info);
  372. datain.offset = tx_data;
  373. datain.data_sn = cmd->data_sn - 1;
  374. iscsit_build_datain_pdu(cmd, conn, &datain,
  375. (struct iscsi_data_rsp *)skb->data,
  376. set_statsn);
  377. ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
  378. if (unlikely(ret)) {
  379. __kfree_skb(skb);
  380. goto out;
  381. }
  382. ret = cxgbit_queue_skb(csk, skb);
  383. if (unlikely(ret))
  384. goto out;
  385. tx_data += plen;
  386. data_length -= plen;
  387. cmd->read_data_done += plen;
  388. cmd->data_sn += num_pdu;
  389. }
  390. dr->dr_complete = DATAIN_COMPLETE_NORMAL;
  391. return 0;
  392. out:
  393. return ret;
  394. }
  395. static int
  396. cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
  397. const struct iscsi_datain *datain)
  398. {
  399. struct sk_buff *skb;
  400. int ret = 0;
  401. skb = cxgbit_alloc_skb(csk, 0);
  402. if (unlikely(!skb))
  403. return -ENOMEM;
  404. memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
  405. if (datain->length) {
  406. cxgbit_skcb_submode(skb) |= (csk->submode &
  407. CXGBIT_SUBMODE_DCRC);
  408. cxgbit_skcb_tx_extralen(skb) =
  409. cxgbit_digest_len[cxgbit_skcb_submode(skb)];
  410. }
  411. ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
  412. if (ret < 0) {
  413. __kfree_skb(skb);
  414. return ret;
  415. }
  416. return cxgbit_queue_skb(csk, skb);
  417. }
  418. static int
  419. cxgbit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
  420. struct iscsi_datain_req *dr,
  421. const struct iscsi_datain *datain)
  422. {
  423. struct cxgbit_sock *csk = conn->context;
  424. u32 data_length = cmd->se_cmd.data_length;
  425. u32 padding = ((-data_length) & 3);
  426. u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
  427. if ((data_length > mrdsl) && (!dr->recovery) &&
  428. (!padding) && (!datain->offset) && csk->max_iso_npdu) {
  429. atomic_long_add(data_length - datain->length,
  430. &conn->sess->tx_data_octets);
  431. return cxgbit_tx_datain_iso(csk, cmd, dr);
  432. }
  433. return cxgbit_tx_datain(csk, cmd, datain);
  434. }
  435. static int
  436. cxgbit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
  437. const void *data_buf, u32 data_buf_len)
  438. {
  439. struct cxgbit_sock *csk = conn->context;
  440. struct sk_buff *skb;
  441. u32 padding = ((-data_buf_len) & 3);
  442. skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
  443. if (unlikely(!skb))
  444. return -ENOMEM;
  445. memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
  446. if (data_buf_len) {
  447. u32 pad_bytes = 0;
  448. skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
  449. if (padding)
  450. skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
  451. &pad_bytes, padding);
  452. }
  453. cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
  454. cxgbit_skcb_submode(skb)];
  455. return cxgbit_queue_skb(csk, skb);
  456. }
  457. int
  458. cxgbit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
  459. struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
  460. {
  461. if (dr)
  462. return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
  463. else
  464. return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
  465. }
  466. int cxgbit_validate_params(struct iscsit_conn *conn)
  467. {
  468. struct cxgbit_sock *csk = conn->context;
  469. struct cxgbit_device *cdev = csk->com.cdev;
  470. struct iscsi_param *param;
  471. u32 max_xmitdsl;
  472. param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
  473. conn->param_list);
  474. if (!param)
  475. return -1;
  476. if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
  477. return -1;
  478. if (max_xmitdsl > cdev->mdsl) {
  479. if (iscsi_change_param_sprintf(
  480. conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
  481. return -1;
  482. }
  483. return 0;
  484. }
  485. static int cxgbit_set_digest(struct cxgbit_sock *csk)
  486. {
  487. struct iscsit_conn *conn = csk->conn;
  488. struct iscsi_param *param;
  489. param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
  490. if (!param) {
  491. pr_err("param not found key %s\n", HEADERDIGEST);
  492. return -1;
  493. }
  494. if (!strcmp(param->value, CRC32C))
  495. csk->submode |= CXGBIT_SUBMODE_HCRC;
  496. param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
  497. if (!param) {
  498. csk->submode = 0;
  499. pr_err("param not found key %s\n", DATADIGEST);
  500. return -1;
  501. }
  502. if (!strcmp(param->value, CRC32C))
  503. csk->submode |= CXGBIT_SUBMODE_DCRC;
  504. if (cxgbit_setup_conn_digest(csk)) {
  505. csk->submode = 0;
  506. return -1;
  507. }
  508. return 0;
  509. }
  510. static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
  511. {
  512. struct iscsit_conn *conn = csk->conn;
  513. struct iscsi_conn_ops *conn_ops = conn->conn_ops;
  514. struct iscsi_param *param;
  515. u32 mrdsl, mbl;
  516. u32 max_npdu, max_iso_npdu;
  517. u32 max_iso_payload;
  518. if (conn->login->leading_connection) {
  519. param = iscsi_find_param_from_key(MAXBURSTLENGTH,
  520. conn->param_list);
  521. if (!param) {
  522. pr_err("param not found key %s\n", MAXBURSTLENGTH);
  523. return -1;
  524. }
  525. if (kstrtou32(param->value, 0, &mbl) < 0)
  526. return -1;
  527. } else {
  528. mbl = conn->sess->sess_ops->MaxBurstLength;
  529. }
  530. mrdsl = conn_ops->MaxRecvDataSegmentLength;
  531. max_npdu = mbl / mrdsl;
  532. max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
  533. max_iso_npdu = max_iso_payload /
  534. (ISCSI_HDR_LEN + mrdsl +
  535. cxgbit_digest_len[csk->submode]);
  536. csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
  537. if (csk->max_iso_npdu <= 1)
  538. csk->max_iso_npdu = 0;
  539. return 0;
  540. }
  541. /*
  542. * cxgbit_seq_pdu_inorder()
  543. * @csk: pointer to cxgbit socket structure
  544. *
  545. * This function checks whether data sequence and data
  546. * pdu are in order.
  547. *
  548. * Return: returns -1 on error, 0 if data sequence and
  549. * data pdu are in order, 1 if data sequence or data pdu
  550. * is not in order.
  551. */
  552. static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
  553. {
  554. struct iscsit_conn *conn = csk->conn;
  555. struct iscsi_param *param;
  556. if (conn->login->leading_connection) {
  557. param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
  558. conn->param_list);
  559. if (!param) {
  560. pr_err("param not found key %s\n", DATASEQUENCEINORDER);
  561. return -1;
  562. }
  563. if (strcmp(param->value, YES))
  564. return 1;
  565. param = iscsi_find_param_from_key(DATAPDUINORDER,
  566. conn->param_list);
  567. if (!param) {
  568. pr_err("param not found key %s\n", DATAPDUINORDER);
  569. return -1;
  570. }
  571. if (strcmp(param->value, YES))
  572. return 1;
  573. } else {
  574. if (!conn->sess->sess_ops->DataSequenceInOrder)
  575. return 1;
  576. if (!conn->sess->sess_ops->DataPDUInOrder)
  577. return 1;
  578. }
  579. return 0;
  580. }
  581. static int cxgbit_set_params(struct iscsit_conn *conn)
  582. {
  583. struct cxgbit_sock *csk = conn->context;
  584. struct cxgbit_device *cdev = csk->com.cdev;
  585. struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
  586. struct iscsi_conn_ops *conn_ops = conn->conn_ops;
  587. struct iscsi_param *param;
  588. u8 erl;
  589. if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
  590. conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
  591. if (cxgbit_set_digest(csk))
  592. return -1;
  593. if (conn->login->leading_connection) {
  594. param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
  595. conn->param_list);
  596. if (!param) {
  597. pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
  598. return -1;
  599. }
  600. if (kstrtou8(param->value, 0, &erl) < 0)
  601. return -1;
  602. } else {
  603. erl = conn->sess->sess_ops->ErrorRecoveryLevel;
  604. }
  605. if (!erl) {
  606. int ret;
  607. ret = cxgbit_seq_pdu_inorder(csk);
  608. if (ret < 0) {
  609. return -1;
  610. } else if (ret > 0) {
  611. if (is_t5(cdev->lldi.adapter_type))
  612. goto enable_ddp;
  613. else
  614. return 0;
  615. }
  616. if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
  617. if (cxgbit_set_iso_npdu(csk))
  618. return -1;
  619. }
  620. enable_ddp:
  621. if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
  622. if (cxgbit_setup_conn_pgidx(csk,
  623. ppm->tformat.pgsz_idx_dflt))
  624. return -1;
  625. set_bit(CSK_DDP_ENABLE, &csk->com.flags);
  626. }
  627. }
  628. return 0;
  629. }
  630. int
  631. cxgbit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
  632. u32 length)
  633. {
  634. struct cxgbit_sock *csk = conn->context;
  635. struct sk_buff *skb;
  636. u32 padding_buf = 0;
  637. u8 padding = ((-length) & 3);
  638. skb = cxgbit_alloc_skb(csk, length + padding);
  639. if (!skb)
  640. return -ENOMEM;
  641. skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
  642. skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
  643. if (padding)
  644. skb_store_bits(skb, ISCSI_HDR_LEN + length,
  645. &padding_buf, padding);
  646. if (login->login_complete) {
  647. if (cxgbit_set_params(conn)) {
  648. kfree_skb(skb);
  649. return -1;
  650. }
  651. set_bit(CSK_LOGIN_DONE, &csk->com.flags);
  652. }
  653. if (cxgbit_queue_skb(csk, skb))
  654. return -1;
  655. if ((!login->login_complete) && (!login->login_failed))
  656. schedule_delayed_work(&conn->login_work, 0);
  657. return 0;
  658. }
  659. static void
  660. cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
  661. unsigned int nents, u32 skip)
  662. {
  663. struct skb_seq_state st;
  664. const u8 *buf;
  665. unsigned int consumed = 0, buf_len;
  666. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
  667. skb_prepare_seq_read(skb, pdu_cb->doffset,
  668. pdu_cb->doffset + pdu_cb->dlen,
  669. &st);
  670. while (true) {
  671. buf_len = skb_seq_read(consumed, &buf, &st);
  672. if (!buf_len) {
  673. skb_abort_seq_read(&st);
  674. break;
  675. }
  676. consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
  677. buf_len, skip + consumed);
  678. }
  679. }
  680. static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
  681. {
  682. struct iscsit_conn *conn = csk->conn;
  683. struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
  684. struct cxgbit_cmd *ccmd;
  685. struct iscsit_cmd *cmd;
  686. cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
  687. if (!cmd) {
  688. pr_err("Unable to allocate iscsit_cmd + cxgbit_cmd\n");
  689. return NULL;
  690. }
  691. ccmd = iscsit_priv_cmd(cmd);
  692. ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
  693. ccmd->setup_ddp = true;
  694. return cmd;
  695. }
  696. static int
  697. cxgbit_handle_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
  698. u32 length)
  699. {
  700. struct iscsit_conn *conn = cmd->conn;
  701. struct cxgbit_sock *csk = conn->context;
  702. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  703. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  704. pr_err("ImmediateData CRC32C DataDigest error\n");
  705. if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
  706. pr_err("Unable to recover from"
  707. " Immediate Data digest failure while"
  708. " in ERL=0.\n");
  709. iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
  710. (unsigned char *)hdr);
  711. return IMMEDIATE_DATA_CANNOT_RECOVER;
  712. }
  713. iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
  714. (unsigned char *)hdr);
  715. return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
  716. }
  717. if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
  718. struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
  719. struct skb_shared_info *ssi = skb_shinfo(csk->skb);
  720. skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
  721. sg_init_table(&ccmd->sg, 1);
  722. sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
  723. skb_frag_size(dfrag), skb_frag_off(dfrag));
  724. get_page(skb_frag_page(dfrag));
  725. cmd->se_cmd.t_data_sg = &ccmd->sg;
  726. cmd->se_cmd.t_data_nents = 1;
  727. ccmd->release = true;
  728. } else {
  729. struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
  730. u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
  731. cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
  732. }
  733. cmd->write_data_done += pdu_cb->dlen;
  734. if (cmd->write_data_done == cmd->se_cmd.data_length) {
  735. spin_lock_bh(&cmd->istate_lock);
  736. cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
  737. cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
  738. spin_unlock_bh(&cmd->istate_lock);
  739. }
  740. return IMMEDIATE_DATA_NORMAL_OPERATION;
  741. }
  742. static int
  743. cxgbit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
  744. bool dump_payload)
  745. {
  746. struct iscsit_conn *conn = cmd->conn;
  747. int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
  748. /*
  749. * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
  750. */
  751. if (dump_payload)
  752. goto after_immediate_data;
  753. immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
  754. cmd->first_burst_len);
  755. after_immediate_data:
  756. if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
  757. /*
  758. * A PDU/CmdSN carrying Immediate Data passed
  759. * DataCRC, check against ExpCmdSN/MaxCmdSN if
  760. * Immediate Bit is not set.
  761. */
  762. cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
  763. (unsigned char *)hdr,
  764. hdr->cmdsn);
  765. if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
  766. return -1;
  767. if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
  768. target_put_sess_cmd(&cmd->se_cmd);
  769. return 0;
  770. } else if (cmd->unsolicited_data) {
  771. iscsit_set_unsolicited_dataout(cmd);
  772. }
  773. } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
  774. /*
  775. * Immediate Data failed DataCRC and ERL>=1,
  776. * silently drop this PDU and let the initiator
  777. * plug the CmdSN gap.
  778. *
  779. * FIXME: Send Unsolicited NOPIN with reserved
  780. * TTT here to help the initiator figure out
  781. * the missing CmdSN, although they should be
  782. * intelligent enough to determine the missing
  783. * CmdSN and issue a retry to plug the sequence.
  784. */
  785. cmd->i_state = ISTATE_REMOVE;
  786. iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
  787. } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
  788. return -1;
  789. return 0;
  790. }
  791. static int
  792. cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
  793. {
  794. struct iscsit_conn *conn = csk->conn;
  795. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  796. struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
  797. int rc;
  798. bool dump_payload = false;
  799. rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
  800. if (rc < 0)
  801. return rc;
  802. if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
  803. (pdu_cb->nr_dfrags == 1))
  804. cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  805. rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
  806. if (rc < 0)
  807. return 0;
  808. else if (rc > 0)
  809. dump_payload = true;
  810. if (!pdu_cb->dlen)
  811. return 0;
  812. return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
  813. }
  814. static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
  815. {
  816. struct scatterlist *sg_start;
  817. struct iscsit_conn *conn = csk->conn;
  818. struct iscsit_cmd *cmd = NULL;
  819. struct cxgbit_cmd *ccmd;
  820. struct cxgbi_task_tag_info *ttinfo;
  821. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  822. struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
  823. u32 data_offset = be32_to_cpu(hdr->offset);
  824. u32 data_len = ntoh24(hdr->dlength);
  825. int rc, sg_nents, sg_off;
  826. bool dcrc_err = false;
  827. if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
  828. u32 offset = be32_to_cpu(hdr->offset);
  829. u32 ddp_data_len;
  830. bool success = false;
  831. cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
  832. if (!cmd)
  833. return 0;
  834. ddp_data_len = offset - cmd->write_data_done;
  835. atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
  836. cmd->write_data_done = offset;
  837. cmd->next_burst_len = ddp_data_len;
  838. cmd->data_sn = be32_to_cpu(hdr->datasn);
  839. rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
  840. cmd, data_len, &success);
  841. if (rc < 0)
  842. return rc;
  843. else if (!success)
  844. return 0;
  845. } else {
  846. rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
  847. if (rc < 0)
  848. return rc;
  849. else if (!cmd)
  850. return 0;
  851. }
  852. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  853. pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
  854. " DataSN: 0x%08x\n",
  855. hdr->itt, hdr->offset, data_len,
  856. hdr->datasn);
  857. dcrc_err = true;
  858. goto check_payload;
  859. }
  860. pr_debug("DataOut data_len: %u, "
  861. "write_data_done: %u, data_length: %u\n",
  862. data_len, cmd->write_data_done,
  863. cmd->se_cmd.data_length);
  864. if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
  865. u32 skip = data_offset % PAGE_SIZE;
  866. sg_off = data_offset / PAGE_SIZE;
  867. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  868. sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
  869. cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
  870. }
  871. ccmd = iscsit_priv_cmd(cmd);
  872. ttinfo = &ccmd->ttinfo;
  873. if (ccmd->release && ttinfo->sgl &&
  874. (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
  875. struct cxgbit_device *cdev = csk->com.cdev;
  876. struct cxgbi_ppm *ppm = cdev2ppm(cdev);
  877. dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
  878. DMA_FROM_DEVICE);
  879. ttinfo->nents = 0;
  880. ttinfo->sgl = NULL;
  881. }
  882. check_payload:
  883. rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
  884. if (rc < 0)
  885. return rc;
  886. return 0;
  887. }
  888. static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
  889. {
  890. struct iscsit_conn *conn = csk->conn;
  891. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  892. struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
  893. unsigned char *ping_data = NULL;
  894. u32 payload_length = pdu_cb->dlen;
  895. int ret;
  896. ret = iscsit_setup_nop_out(conn, cmd, hdr);
  897. if (ret < 0)
  898. return 0;
  899. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  900. if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
  901. pr_err("Unable to recover from"
  902. " NOPOUT Ping DataCRC failure while in"
  903. " ERL=0.\n");
  904. ret = -1;
  905. goto out;
  906. } else {
  907. /*
  908. * drop this PDU and let the
  909. * initiator plug the CmdSN gap.
  910. */
  911. pr_info("Dropping NOPOUT"
  912. " Command CmdSN: 0x%08x due to"
  913. " DataCRC error.\n", hdr->cmdsn);
  914. ret = 0;
  915. goto out;
  916. }
  917. }
  918. /*
  919. * Handle NOP-OUT payload for traditional iSCSI sockets
  920. */
  921. if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
  922. ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
  923. if (!ping_data) {
  924. pr_err("Unable to allocate memory for"
  925. " NOPOUT ping data.\n");
  926. ret = -1;
  927. goto out;
  928. }
  929. skb_copy_bits(csk->skb, pdu_cb->doffset,
  930. ping_data, payload_length);
  931. ping_data[payload_length] = '\0';
  932. /*
  933. * Attach ping data to struct iscsit_cmd->buf_ptr.
  934. */
  935. cmd->buf_ptr = ping_data;
  936. cmd->buf_ptr_size = payload_length;
  937. pr_debug("Got %u bytes of NOPOUT ping"
  938. " data.\n", payload_length);
  939. pr_debug("Ping Data: \"%s\"\n", ping_data);
  940. }
  941. return iscsit_process_nop_out(conn, cmd, hdr);
  942. out:
  943. if (cmd)
  944. iscsit_free_cmd(cmd, false);
  945. return ret;
  946. }
  947. static int
  948. cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
  949. {
  950. struct iscsit_conn *conn = csk->conn;
  951. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  952. struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
  953. u32 payload_length = pdu_cb->dlen;
  954. int rc;
  955. unsigned char *text_in = NULL;
  956. rc = iscsit_setup_text_cmd(conn, cmd, hdr);
  957. if (rc < 0)
  958. return rc;
  959. if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
  960. if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
  961. pr_err("Unable to recover from"
  962. " Text Data digest failure while in"
  963. " ERL=0.\n");
  964. goto reject;
  965. } else {
  966. /*
  967. * drop this PDU and let the
  968. * initiator plug the CmdSN gap.
  969. */
  970. pr_info("Dropping Text"
  971. " Command CmdSN: 0x%08x due to"
  972. " DataCRC error.\n", hdr->cmdsn);
  973. return 0;
  974. }
  975. }
  976. if (payload_length) {
  977. text_in = kzalloc(payload_length, GFP_KERNEL);
  978. if (!text_in) {
  979. pr_err("Unable to allocate text_in of payload_length: %u\n",
  980. payload_length);
  981. return -ENOMEM;
  982. }
  983. skb_copy_bits(csk->skb, pdu_cb->doffset,
  984. text_in, payload_length);
  985. text_in[payload_length - 1] = '\0';
  986. cmd->text_in_ptr = text_in;
  987. }
  988. return iscsit_process_text_cmd(conn, cmd, hdr);
  989. reject:
  990. return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
  991. pdu_cb->hdr);
  992. }
  993. static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
  994. {
  995. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  996. struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
  997. struct iscsit_conn *conn = csk->conn;
  998. struct iscsit_cmd *cmd = NULL;
  999. u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
  1000. int ret = -EINVAL;
  1001. switch (opcode) {
  1002. case ISCSI_OP_SCSI_CMD:
  1003. cmd = cxgbit_allocate_cmd(csk);
  1004. if (!cmd)
  1005. goto reject;
  1006. ret = cxgbit_handle_scsi_cmd(csk, cmd);
  1007. break;
  1008. case ISCSI_OP_SCSI_DATA_OUT:
  1009. ret = cxgbit_handle_iscsi_dataout(csk);
  1010. break;
  1011. case ISCSI_OP_NOOP_OUT:
  1012. if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
  1013. cmd = cxgbit_allocate_cmd(csk);
  1014. if (!cmd)
  1015. goto reject;
  1016. }
  1017. ret = cxgbit_handle_nop_out(csk, cmd);
  1018. break;
  1019. case ISCSI_OP_SCSI_TMFUNC:
  1020. cmd = cxgbit_allocate_cmd(csk);
  1021. if (!cmd)
  1022. goto reject;
  1023. ret = iscsit_handle_task_mgt_cmd(conn, cmd,
  1024. (unsigned char *)hdr);
  1025. break;
  1026. case ISCSI_OP_TEXT:
  1027. if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
  1028. cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
  1029. if (!cmd)
  1030. goto reject;
  1031. } else {
  1032. cmd = cxgbit_allocate_cmd(csk);
  1033. if (!cmd)
  1034. goto reject;
  1035. }
  1036. ret = cxgbit_handle_text_cmd(csk, cmd);
  1037. break;
  1038. case ISCSI_OP_LOGOUT:
  1039. cmd = cxgbit_allocate_cmd(csk);
  1040. if (!cmd)
  1041. goto reject;
  1042. ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
  1043. if (ret > 0)
  1044. wait_for_completion_timeout(&conn->conn_logout_comp,
  1045. SECONDS_FOR_LOGOUT_COMP
  1046. * HZ);
  1047. break;
  1048. case ISCSI_OP_SNACK:
  1049. ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
  1050. break;
  1051. default:
  1052. pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
  1053. dump_stack();
  1054. break;
  1055. }
  1056. return ret;
  1057. reject:
  1058. return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
  1059. (unsigned char *)hdr);
  1060. return ret;
  1061. }
  1062. static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
  1063. {
  1064. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  1065. struct iscsit_conn *conn = csk->conn;
  1066. struct iscsi_hdr *hdr = pdu_cb->hdr;
  1067. u8 opcode;
  1068. if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
  1069. atomic_long_inc(&conn->sess->conn_digest_errors);
  1070. goto transport_err;
  1071. }
  1072. if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
  1073. goto transport_err;
  1074. opcode = hdr->opcode & ISCSI_OPCODE_MASK;
  1075. if (conn->sess->sess_ops->SessionType &&
  1076. ((!(opcode & ISCSI_OP_TEXT)) ||
  1077. (!(opcode & ISCSI_OP_LOGOUT)))) {
  1078. pr_err("Received illegal iSCSI Opcode: 0x%02x"
  1079. " while in Discovery Session, rejecting.\n", opcode);
  1080. iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
  1081. (unsigned char *)hdr);
  1082. goto transport_err;
  1083. }
  1084. if (cxgbit_target_rx_opcode(csk) < 0)
  1085. goto transport_err;
  1086. return 0;
  1087. transport_err:
  1088. return -1;
  1089. }
  1090. static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
  1091. {
  1092. struct iscsit_conn *conn = csk->conn;
  1093. struct iscsi_login *login = conn->login;
  1094. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
  1095. struct iscsi_login_req *login_req;
  1096. login_req = (struct iscsi_login_req *)login->req;
  1097. memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
  1098. pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
  1099. " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
  1100. login_req->flags, login_req->itt, login_req->cmdsn,
  1101. login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
  1102. /*
  1103. * Setup the initial iscsi_login values from the leading
  1104. * login request PDU.
  1105. */
  1106. if (login->first_request) {
  1107. login_req = (struct iscsi_login_req *)login->req;
  1108. login->leading_connection = (!login_req->tsih) ? 1 : 0;
  1109. login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
  1110. login_req->flags);
  1111. login->version_min = login_req->min_version;
  1112. login->version_max = login_req->max_version;
  1113. memcpy(login->isid, login_req->isid, 6);
  1114. login->cmd_sn = be32_to_cpu(login_req->cmdsn);
  1115. login->init_task_tag = login_req->itt;
  1116. login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
  1117. login->cid = be16_to_cpu(login_req->cid);
  1118. login->tsih = be16_to_cpu(login_req->tsih);
  1119. }
  1120. if (iscsi_target_check_login_request(conn, login) < 0)
  1121. return -1;
  1122. memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
  1123. skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
  1124. return 0;
  1125. }
  1126. static int
  1127. cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
  1128. {
  1129. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
  1130. int ret;
  1131. cxgbit_rx_pdu_cb(skb) = pdu_cb;
  1132. csk->skb = skb;
  1133. if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
  1134. ret = cxgbit_rx_login_pdu(csk);
  1135. set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
  1136. } else {
  1137. ret = cxgbit_rx_opcode(csk);
  1138. }
  1139. return ret;
  1140. }
  1141. static void cxgbit_lro_skb_dump(struct sk_buff *skb)
  1142. {
  1143. struct skb_shared_info *ssi = skb_shinfo(skb);
  1144. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1145. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
  1146. u8 i;
  1147. pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
  1148. skb, skb->head, skb->data, skb->len, skb->data_len,
  1149. ssi->nr_frags);
  1150. pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
  1151. skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
  1152. for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
  1153. pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
  1154. "frags %u.\n",
  1155. skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
  1156. pdu_cb->ddigest, pdu_cb->frags);
  1157. for (i = 0; i < ssi->nr_frags; i++)
  1158. pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
  1159. skb, i, skb_frag_off(&ssi->frags[i]),
  1160. skb_frag_size(&ssi->frags[i]));
  1161. }
  1162. static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
  1163. {
  1164. struct sk_buff *skb = csk->lro_hskb;
  1165. struct skb_shared_info *ssi = skb_shinfo(skb);
  1166. u8 i;
  1167. memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
  1168. for (i = 0; i < ssi->nr_frags; i++)
  1169. put_page(skb_frag_page(&ssi->frags[i]));
  1170. ssi->nr_frags = 0;
  1171. skb->data_len = 0;
  1172. skb->truesize -= skb->len;
  1173. skb->len = 0;
  1174. }
  1175. static void
  1176. cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
  1177. {
  1178. struct sk_buff *hskb = csk->lro_hskb;
  1179. struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
  1180. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
  1181. struct skb_shared_info *hssi = skb_shinfo(hskb);
  1182. struct skb_shared_info *ssi = skb_shinfo(skb);
  1183. unsigned int len = 0;
  1184. if (pdu_cb->flags & PDUCBF_RX_HDR) {
  1185. u8 hfrag_idx = hssi->nr_frags;
  1186. hpdu_cb->flags |= pdu_cb->flags;
  1187. hpdu_cb->seq = pdu_cb->seq;
  1188. hpdu_cb->hdr = pdu_cb->hdr;
  1189. hpdu_cb->hlen = pdu_cb->hlen;
  1190. memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
  1191. sizeof(skb_frag_t));
  1192. get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
  1193. hssi->nr_frags++;
  1194. hpdu_cb->frags++;
  1195. hpdu_cb->hfrag_idx = hfrag_idx;
  1196. len = skb_frag_size(&hssi->frags[hfrag_idx]);
  1197. hskb->len += len;
  1198. hskb->data_len += len;
  1199. hskb->truesize += len;
  1200. }
  1201. if (pdu_cb->flags & PDUCBF_RX_DATA) {
  1202. u8 dfrag_idx = hssi->nr_frags, i;
  1203. hpdu_cb->flags |= pdu_cb->flags;
  1204. hpdu_cb->dfrag_idx = dfrag_idx;
  1205. len = 0;
  1206. for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
  1207. memcpy(&hssi->frags[dfrag_idx],
  1208. &ssi->frags[pdu_cb->dfrag_idx + i],
  1209. sizeof(skb_frag_t));
  1210. get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
  1211. len += skb_frag_size(&hssi->frags[dfrag_idx]);
  1212. hssi->nr_frags++;
  1213. hpdu_cb->frags++;
  1214. }
  1215. hpdu_cb->dlen = pdu_cb->dlen;
  1216. hpdu_cb->doffset = hpdu_cb->hlen;
  1217. hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
  1218. hskb->len += len;
  1219. hskb->data_len += len;
  1220. hskb->truesize += len;
  1221. }
  1222. if (pdu_cb->flags & PDUCBF_RX_STATUS) {
  1223. hpdu_cb->flags |= pdu_cb->flags;
  1224. if (hpdu_cb->flags & PDUCBF_RX_DATA)
  1225. hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
  1226. hpdu_cb->ddigest = pdu_cb->ddigest;
  1227. hpdu_cb->pdulen = pdu_cb->pdulen;
  1228. }
  1229. }
  1230. static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1231. {
  1232. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1233. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
  1234. u8 pdu_idx = 0, last_idx = 0;
  1235. int ret = 0;
  1236. if (!pdu_cb->complete) {
  1237. cxgbit_lro_skb_merge(csk, skb, 0);
  1238. if (pdu_cb->flags & PDUCBF_RX_STATUS) {
  1239. struct sk_buff *hskb = csk->lro_hskb;
  1240. ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
  1241. cxgbit_lro_hskb_reset(csk);
  1242. if (ret < 0)
  1243. goto out;
  1244. }
  1245. pdu_idx = 1;
  1246. }
  1247. if (lro_cb->pdu_idx)
  1248. last_idx = lro_cb->pdu_idx - 1;
  1249. for (; pdu_idx <= last_idx; pdu_idx++) {
  1250. ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
  1251. if (ret < 0)
  1252. goto out;
  1253. }
  1254. if ((!lro_cb->complete) && lro_cb->pdu_idx)
  1255. cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
  1256. out:
  1257. return ret;
  1258. }
  1259. static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1260. {
  1261. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1262. struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
  1263. int ret = -1;
  1264. if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
  1265. (pdu_cb->seq != csk->rcv_nxt)) {
  1266. pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
  1267. csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
  1268. cxgbit_lro_skb_dump(skb);
  1269. return ret;
  1270. }
  1271. csk->rcv_nxt += lro_cb->pdu_totallen;
  1272. ret = cxgbit_process_lro_skb(csk, skb);
  1273. csk->rx_credits += lro_cb->pdu_totallen;
  1274. if (csk->rx_credits >= (csk->rcv_win / 4))
  1275. cxgbit_rx_data_ack(csk);
  1276. return ret;
  1277. }
  1278. static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1279. {
  1280. struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
  1281. int ret;
  1282. ret = cxgbit_process_lro_skb(csk, skb);
  1283. if (ret)
  1284. return ret;
  1285. csk->rx_credits += lro_cb->pdu_totallen;
  1286. if (csk->rx_credits >= csk->rcv_win) {
  1287. csk->rx_credits = 0;
  1288. cxgbit_rx_data_ack(csk);
  1289. }
  1290. return 0;
  1291. }
  1292. static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1293. {
  1294. struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  1295. int ret = -1;
  1296. if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
  1297. if (is_t5(lldi->adapter_type))
  1298. ret = cxgbit_t5_rx_lro_skb(csk, skb);
  1299. else
  1300. ret = cxgbit_rx_lro_skb(csk, skb);
  1301. }
  1302. __kfree_skb(skb);
  1303. return ret;
  1304. }
  1305. static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
  1306. {
  1307. spin_lock_bh(&csk->rxq.lock);
  1308. if (skb_queue_len(&csk->rxq)) {
  1309. skb_queue_splice_init(&csk->rxq, rxq);
  1310. spin_unlock_bh(&csk->rxq.lock);
  1311. return true;
  1312. }
  1313. spin_unlock_bh(&csk->rxq.lock);
  1314. return false;
  1315. }
  1316. static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
  1317. {
  1318. struct sk_buff *skb;
  1319. struct sk_buff_head rxq;
  1320. skb_queue_head_init(&rxq);
  1321. wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
  1322. if (signal_pending(current))
  1323. goto out;
  1324. while ((skb = __skb_dequeue(&rxq))) {
  1325. if (cxgbit_rx_skb(csk, skb))
  1326. goto out;
  1327. }
  1328. return 0;
  1329. out:
  1330. __skb_queue_purge(&rxq);
  1331. return -1;
  1332. }
  1333. int cxgbit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
  1334. {
  1335. struct cxgbit_sock *csk = conn->context;
  1336. int ret = -1;
  1337. while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
  1338. ret = cxgbit_wait_rxq(csk);
  1339. if (ret) {
  1340. clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
  1341. break;
  1342. }
  1343. }
  1344. return ret;
  1345. }
  1346. void cxgbit_get_rx_pdu(struct iscsit_conn *conn)
  1347. {
  1348. struct cxgbit_sock *csk = conn->context;
  1349. while (!kthread_should_stop()) {
  1350. iscsit_thread_check_cpumask(conn, current, 0);
  1351. if (cxgbit_wait_rxq(csk))
  1352. return;
  1353. }
  1354. }