tfc_io.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010 Cisco Systems, Inc.
  4. *
  5. * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
  6. *
  7. * Copyright (c) 2007 Intel Corporation. All rights reserved.
  8. * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
  9. * Copyright (c) 2008 Mike Christie
  10. * Copyright (c) 2009 Rising Tide, Inc.
  11. * Copyright (c) 2009 Linux-iSCSI.org
  12. * Copyright (c) 2009 Nicholas A. Bellinger <[email protected]>
  13. */
  14. /* XXX TBD some includes may be extraneous */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/utsname.h>
  18. #include <linux/init.h>
  19. #include <linux/slab.h>
  20. #include <linux/kthread.h>
  21. #include <linux/types.h>
  22. #include <linux/string.h>
  23. #include <linux/configfs.h>
  24. #include <linux/ctype.h>
  25. #include <linux/hash.h>
  26. #include <linux/ratelimit.h>
  27. #include <asm/unaligned.h>
  28. #include <scsi/libfc.h>
  29. #include <target/target_core_base.h>
  30. #include <target/target_core_fabric.h>
  31. #include "tcm_fc.h"
  32. /*
  33. * Deliver read data back to initiator.
  34. * XXX TBD handle resource problems later.
  35. */
  36. int ft_queue_data_in(struct se_cmd *se_cmd)
  37. {
  38. struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
  39. struct fc_frame *fp = NULL;
  40. struct fc_exch *ep;
  41. struct fc_lport *lport;
  42. struct scatterlist *sg = NULL;
  43. size_t remaining;
  44. u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
  45. u32 mem_off = 0;
  46. u32 fh_off = 0;
  47. u32 frame_off = 0;
  48. size_t frame_len = 0;
  49. size_t mem_len = 0;
  50. size_t tlen;
  51. size_t off_in_page;
  52. struct page *page = NULL;
  53. int use_sg;
  54. int error;
  55. void *page_addr;
  56. void *from;
  57. void *to = NULL;
  58. if (cmd->aborted)
  59. return 0;
  60. if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
  61. goto queue_status;
  62. ep = fc_seq_exch(cmd->seq);
  63. lport = ep->lp;
  64. cmd->seq = fc_seq_start_next(cmd->seq);
  65. remaining = se_cmd->data_length;
  66. /*
  67. * Setup to use first mem list entry, unless no data.
  68. */
  69. BUG_ON(remaining && !se_cmd->t_data_sg);
  70. if (remaining) {
  71. sg = se_cmd->t_data_sg;
  72. mem_len = sg->length;
  73. mem_off = sg->offset;
  74. page = sg_page(sg);
  75. }
  76. /* no scatter/gather in skb for odd word length due to fc_seq_send() */
  77. use_sg = !(remaining % 4);
  78. while (remaining) {
  79. struct fc_seq *seq = cmd->seq;
  80. if (!seq) {
  81. pr_debug("%s: Command aborted, xid 0x%x\n",
  82. __func__, ep->xid);
  83. break;
  84. }
  85. if (!mem_len) {
  86. sg = sg_next(sg);
  87. mem_len = min((size_t)sg->length, remaining);
  88. mem_off = sg->offset;
  89. page = sg_page(sg);
  90. }
  91. if (!frame_len) {
  92. /*
  93. * If lport's has capability of Large Send Offload LSO)
  94. * , then allow 'frame_len' to be as big as 'lso_max'
  95. * if indicated transfer length is >= lport->lso_max
  96. */
  97. frame_len = (lport->seq_offload) ? lport->lso_max :
  98. cmd->sess->max_frame;
  99. frame_len = min(frame_len, remaining);
  100. fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
  101. if (!fp)
  102. return -ENOMEM;
  103. to = fc_frame_payload_get(fp, 0);
  104. fh_off = frame_off;
  105. frame_off += frame_len;
  106. /*
  107. * Setup the frame's max payload which is used by base
  108. * driver to indicate HW about max frame size, so that
  109. * HW can do fragmentation appropriately based on
  110. * "gso_max_size" of underline netdev.
  111. */
  112. fr_max_payload(fp) = cmd->sess->max_frame;
  113. }
  114. tlen = min(mem_len, frame_len);
  115. if (use_sg) {
  116. off_in_page = mem_off;
  117. BUG_ON(!page);
  118. get_page(page);
  119. skb_fill_page_desc(fp_skb(fp),
  120. skb_shinfo(fp_skb(fp))->nr_frags,
  121. page, off_in_page, tlen);
  122. fr_len(fp) += tlen;
  123. fp_skb(fp)->data_len += tlen;
  124. fp_skb(fp)->truesize += page_size(page);
  125. } else {
  126. BUG_ON(!page);
  127. from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
  128. page_addr = from;
  129. from += offset_in_page(mem_off);
  130. tlen = min(tlen, (size_t)(PAGE_SIZE -
  131. offset_in_page(mem_off)));
  132. memcpy(to, from, tlen);
  133. kunmap_atomic(page_addr);
  134. to += tlen;
  135. }
  136. mem_off += tlen;
  137. mem_len -= tlen;
  138. frame_len -= tlen;
  139. remaining -= tlen;
  140. if (frame_len &&
  141. (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
  142. continue;
  143. if (!remaining)
  144. f_ctl |= FC_FC_END_SEQ;
  145. fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
  146. FC_TYPE_FCP, f_ctl, fh_off);
  147. error = fc_seq_send(lport, seq, fp);
  148. if (error) {
  149. pr_info_ratelimited("%s: Failed to send frame %p, "
  150. "xid <0x%x>, remaining %zu, "
  151. "lso_max <0x%x>\n",
  152. __func__, fp, ep->xid,
  153. remaining, lport->lso_max);
  154. /*
  155. * Go ahead and set TASK_SET_FULL status ignoring the
  156. * rest of the DataIN, and immediately attempt to
  157. * send the response via ft_queue_status() in order
  158. * to notify the initiator that it should reduce it's
  159. * per LUN queue_depth.
  160. */
  161. se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
  162. break;
  163. }
  164. }
  165. queue_status:
  166. return ft_queue_status(se_cmd);
  167. }
  168. static void ft_execute_work(struct work_struct *work)
  169. {
  170. struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
  171. target_execute_cmd(&cmd->se_cmd);
  172. }
  173. /*
  174. * Receive write data frame.
  175. */
  176. void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
  177. {
  178. struct se_cmd *se_cmd = &cmd->se_cmd;
  179. struct fc_seq *seq = cmd->seq;
  180. struct fc_exch *ep;
  181. struct fc_lport *lport;
  182. struct fc_frame_header *fh;
  183. struct scatterlist *sg = NULL;
  184. u32 mem_off = 0;
  185. u32 rel_off;
  186. size_t frame_len;
  187. size_t mem_len = 0;
  188. size_t tlen;
  189. struct page *page = NULL;
  190. void *page_addr;
  191. void *from;
  192. void *to;
  193. u32 f_ctl;
  194. void *buf;
  195. fh = fc_frame_header_get(fp);
  196. if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
  197. goto drop;
  198. f_ctl = ntoh24(fh->fh_f_ctl);
  199. ep = fc_seq_exch(seq);
  200. lport = ep->lp;
  201. if (cmd->was_ddp_setup) {
  202. BUG_ON(!lport);
  203. /*
  204. * Since DDP (Large Rx offload) was setup for this request,
  205. * payload is expected to be copied directly to user buffers.
  206. */
  207. buf = fc_frame_payload_get(fp, 1);
  208. if (buf)
  209. pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
  210. "cmd->sg_cnt 0x%x. DDP was setup"
  211. " hence not expected to receive frame with "
  212. "payload, Frame will be dropped if"
  213. "'Sequence Initiative' bit in f_ctl is"
  214. "not set\n", __func__, ep->xid, f_ctl,
  215. se_cmd->t_data_sg, se_cmd->t_data_nents);
  216. /*
  217. * Invalidate HW DDP context if it was setup for respective
  218. * command. Invalidation of HW DDP context is requited in both
  219. * situation (success and error).
  220. */
  221. ft_invl_hw_context(cmd);
  222. /*
  223. * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
  224. * write data frame is received successfully where payload is
  225. * posted directly to user buffer and only the last frame's
  226. * header is posted in receive queue.
  227. *
  228. * If "Sequence Initiative (TSI)" bit is not set, means error
  229. * condition w.r.t. DDP, hence drop the packet and let explict
  230. * ABORTS from other end of exchange timer trigger the recovery.
  231. */
  232. if (f_ctl & FC_FC_SEQ_INIT)
  233. goto last_frame;
  234. else
  235. goto drop;
  236. }
  237. rel_off = ntohl(fh->fh_parm_offset);
  238. frame_len = fr_len(fp);
  239. if (frame_len <= sizeof(*fh))
  240. goto drop;
  241. frame_len -= sizeof(*fh);
  242. from = fc_frame_payload_get(fp, 0);
  243. if (rel_off >= se_cmd->data_length)
  244. goto drop;
  245. if (frame_len + rel_off > se_cmd->data_length)
  246. frame_len = se_cmd->data_length - rel_off;
  247. /*
  248. * Setup to use first mem list entry, unless no data.
  249. */
  250. BUG_ON(frame_len && !se_cmd->t_data_sg);
  251. if (frame_len) {
  252. sg = se_cmd->t_data_sg;
  253. mem_len = sg->length;
  254. mem_off = sg->offset;
  255. page = sg_page(sg);
  256. }
  257. while (frame_len) {
  258. if (!mem_len) {
  259. sg = sg_next(sg);
  260. mem_len = sg->length;
  261. mem_off = sg->offset;
  262. page = sg_page(sg);
  263. }
  264. if (rel_off >= mem_len) {
  265. rel_off -= mem_len;
  266. mem_len = 0;
  267. continue;
  268. }
  269. mem_off += rel_off;
  270. mem_len -= rel_off;
  271. rel_off = 0;
  272. tlen = min(mem_len, frame_len);
  273. to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
  274. page_addr = to;
  275. to += offset_in_page(mem_off);
  276. tlen = min(tlen, (size_t)(PAGE_SIZE -
  277. offset_in_page(mem_off)));
  278. memcpy(to, from, tlen);
  279. kunmap_atomic(page_addr);
  280. from += tlen;
  281. frame_len -= tlen;
  282. mem_off += tlen;
  283. mem_len -= tlen;
  284. cmd->write_data_len += tlen;
  285. }
  286. last_frame:
  287. if (cmd->write_data_len == se_cmd->data_length) {
  288. INIT_WORK(&cmd->work, ft_execute_work);
  289. queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
  290. }
  291. drop:
  292. fc_frame_free(fp);
  293. }
  294. /*
  295. * Handle and cleanup any HW specific resources if
  296. * received ABORTS, errors, timeouts.
  297. */
  298. void ft_invl_hw_context(struct ft_cmd *cmd)
  299. {
  300. struct fc_seq *seq;
  301. struct fc_exch *ep = NULL;
  302. struct fc_lport *lport = NULL;
  303. BUG_ON(!cmd);
  304. seq = cmd->seq;
  305. /* Cleanup the DDP context in HW if DDP was setup */
  306. if (cmd->was_ddp_setup && seq) {
  307. ep = fc_seq_exch(seq);
  308. if (ep) {
  309. lport = ep->lp;
  310. if (lport && (ep->xid <= lport->lro_xid)) {
  311. /*
  312. * "ddp_done" trigger invalidation of HW
  313. * specific DDP context
  314. */
  315. cmd->write_data_len = lport->tt.ddp_done(lport,
  316. ep->xid);
  317. /*
  318. * Resetting same variable to indicate HW's
  319. * DDP context has been invalidated to avoid
  320. * re_invalidation of same context (context is
  321. * identified using ep->xid)
  322. */
  323. cmd->was_ddp_setup = 0;
  324. }
  325. }
  326. }
  327. }