efct_scsi.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include "efct_driver.h"
  7. #include "efct_hw.h"
  8. #define enable_tsend_auto_resp(efct) 1
  9. #define enable_treceive_auto_resp(efct) 0
  10. #define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
  11. #define scsi_io_printf(io, fmt, ...) \
  12. efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
  13. io->node->display_name, io->instance_index,\
  14. io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
  15. #define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \
  16. (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
  17. #define scsi_io_trace(io, fmt, ...) \
  18. do { \
  19. if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
  20. scsi_io_printf(io, fmt, ##__VA_ARGS__); \
  21. } while (0)
  22. struct efct_io *
  23. efct_scsi_io_alloc(struct efct_node *node)
  24. {
  25. struct efct *efct;
  26. struct efct_xport *xport;
  27. struct efct_io *io;
  28. unsigned long flags;
  29. efct = node->efct;
  30. xport = efct->xport;
  31. io = efct_io_pool_io_alloc(efct->xport->io_pool);
  32. if (!io) {
  33. efc_log_err(efct, "IO alloc Failed\n");
  34. atomic_add_return(1, &xport->io_alloc_failed_count);
  35. return NULL;
  36. }
  37. /* initialize refcount */
  38. kref_init(&io->ref);
  39. io->release = _efct_scsi_io_free;
  40. /* set generic fields */
  41. io->efct = efct;
  42. io->node = node;
  43. kref_get(&node->ref);
  44. /* set type and name */
  45. io->io_type = EFCT_IO_TYPE_IO;
  46. io->display_name = "scsi_io";
  47. io->cmd_ini = false;
  48. io->cmd_tgt = true;
  49. /* Add to node's active_ios list */
  50. INIT_LIST_HEAD(&io->list_entry);
  51. spin_lock_irqsave(&node->active_ios_lock, flags);
  52. list_add(&io->list_entry, &node->active_ios);
  53. spin_unlock_irqrestore(&node->active_ios_lock, flags);
  54. return io;
  55. }
  56. void
  57. _efct_scsi_io_free(struct kref *arg)
  58. {
  59. struct efct_io *io = container_of(arg, struct efct_io, ref);
  60. struct efct *efct = io->efct;
  61. struct efct_node *node = io->node;
  62. unsigned long flags = 0;
  63. scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
  64. if (io->io_free) {
  65. efc_log_err(efct, "IO already freed.\n");
  66. return;
  67. }
  68. spin_lock_irqsave(&node->active_ios_lock, flags);
  69. list_del_init(&io->list_entry);
  70. spin_unlock_irqrestore(&node->active_ios_lock, flags);
  71. kref_put(&node->ref, node->release);
  72. io->node = NULL;
  73. efct_io_pool_io_free(efct->xport->io_pool, io);
  74. }
  75. void
  76. efct_scsi_io_free(struct efct_io *io)
  77. {
  78. scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
  79. WARN_ON(!refcount_read(&io->ref.refcount));
  80. kref_put(&io->ref, io->release);
  81. }
  82. static void
  83. efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
  84. u32 ext_status, void *app)
  85. {
  86. u32 flags = 0;
  87. struct efct_io *io = app;
  88. struct efct *efct;
  89. enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
  90. efct_scsi_io_cb_t cb;
  91. if (!io || !io->efct) {
  92. pr_err("%s: IO can not be NULL\n", __func__);
  93. return;
  94. }
  95. scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
  96. efct = io->efct;
  97. io->transferred += length;
  98. if (!io->scsi_tgt_cb) {
  99. efct_scsi_check_pending(efct);
  100. return;
  101. }
  102. /* Call target server completion */
  103. cb = io->scsi_tgt_cb;
  104. /* Clear the callback before invoking the callback */
  105. io->scsi_tgt_cb = NULL;
  106. /* if status was good, and auto-good-response was set,
  107. * then callback target-server with IO_CMPL_RSP_SENT,
  108. * otherwise send IO_CMPL
  109. */
  110. if (status == 0 && io->auto_resp)
  111. flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
  112. else
  113. flags |= EFCT_SCSI_IO_CMPL;
  114. switch (status) {
  115. case SLI4_FC_WCQE_STATUS_SUCCESS:
  116. scsi_stat = EFCT_SCSI_STATUS_GOOD;
  117. break;
  118. case SLI4_FC_WCQE_STATUS_DI_ERROR:
  119. if (ext_status & SLI4_FC_DI_ERROR_GE)
  120. scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
  121. else if (ext_status & SLI4_FC_DI_ERROR_AE)
  122. scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
  123. else if (ext_status & SLI4_FC_DI_ERROR_RE)
  124. scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
  125. else
  126. scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
  127. break;
  128. case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
  129. switch (ext_status) {
  130. case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
  131. case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
  132. scsi_stat = EFCT_SCSI_STATUS_ABORTED;
  133. break;
  134. case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
  135. scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
  136. break;
  137. case SLI4_FC_LOCAL_REJECT_NO_XRI:
  138. scsi_stat = EFCT_SCSI_STATUS_NO_IO;
  139. break;
  140. default:
  141. /*we have seen 0x0d(TX_DMA_FAILED err)*/
  142. scsi_stat = EFCT_SCSI_STATUS_ERROR;
  143. break;
  144. }
  145. break;
  146. case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
  147. /* target IO timed out */
  148. scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
  149. break;
  150. case SLI4_FC_WCQE_STATUS_SHUTDOWN:
  151. /* Target IO cancelled by HW */
  152. scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
  153. break;
  154. default:
  155. scsi_stat = EFCT_SCSI_STATUS_ERROR;
  156. break;
  157. }
  158. cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
  159. efct_scsi_check_pending(efct);
  160. }
  161. static int
  162. efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
  163. struct efct_scsi_sgl *sgl, u32 sgl_count,
  164. enum efct_hw_io_type type)
  165. {
  166. int rc;
  167. u32 i;
  168. struct efct *efct = hw->os;
  169. /* Initialize HW SGL */
  170. rc = efct_hw_io_init_sges(hw, hio, type);
  171. if (rc) {
  172. efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
  173. return -EIO;
  174. }
  175. for (i = 0; i < sgl_count; i++) {
  176. /* Add data SGE */
  177. rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
  178. if (rc) {
  179. efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
  180. sgl_count, rc);
  181. return rc;
  182. }
  183. }
  184. return 0;
  185. }
  186. static void efc_log_sgl(struct efct_io *io)
  187. {
  188. struct efct_hw_io *hio = io->hio;
  189. struct sli4_sge *data = NULL;
  190. u32 *dword = NULL;
  191. u32 i;
  192. u32 n_sge;
  193. scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
  194. upper_32_bits(hio->def_sgl.phys),
  195. lower_32_bits(hio->def_sgl.phys));
  196. n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
  197. for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
  198. dword = (u32 *)data;
  199. scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
  200. i, dword[0], dword[1], dword[2], dword[3]);
  201. if (dword[2] & (1U << 31))
  202. break;
  203. }
  204. }
  205. static void
  206. efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
  207. u8 *mqe, void *arg)
  208. {
  209. struct efct_io *io = arg;
  210. if (io) {
  211. efct_hw_done_t cb = io->hw_cb;
  212. if (!io->hw_cb)
  213. return;
  214. io->hw_cb = NULL;
  215. (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
  216. }
  217. }
  218. static int
  219. efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
  220. {
  221. int rc = 0;
  222. struct efct *efct = io->efct;
  223. /* Got a HW IO;
  224. * update ini/tgt_task_tag with HW IO info and dispatch
  225. */
  226. io->hio = hio;
  227. if (io->cmd_tgt)
  228. io->tgt_task_tag = hio->indicator;
  229. else if (io->cmd_ini)
  230. io->init_task_tag = hio->indicator;
  231. io->hw_tag = hio->reqtag;
  232. hio->eq = io->hw_priv;
  233. /* Copy WQ steering */
  234. switch (io->wq_steering) {
  235. case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
  236. hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
  237. break;
  238. case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
  239. hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
  240. break;
  241. case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
  242. hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
  243. break;
  244. }
  245. switch (io->io_type) {
  246. case EFCT_IO_TYPE_IO:
  247. rc = efct_scsi_build_sgls(&efct->hw, io->hio,
  248. io->sgl, io->sgl_count, io->hio_type);
  249. if (rc)
  250. break;
  251. if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
  252. efc_log_sgl(io);
  253. if (io->app_id)
  254. io->iparam.fcp_tgt.app_id = io->app_id;
  255. io->iparam.fcp_tgt.vpi = io->node->vpi;
  256. io->iparam.fcp_tgt.rpi = io->node->rpi;
  257. io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
  258. io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
  259. io->iparam.fcp_tgt.xmit_len = io->wire_len;
  260. rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
  261. &io->iparam, io->hw_cb, io);
  262. break;
  263. default:
  264. scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
  265. rc = -EIO;
  266. break;
  267. }
  268. return rc;
  269. }
  270. static int
  271. efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
  272. {
  273. int rc;
  274. switch (io->io_type) {
  275. case EFCT_IO_TYPE_ABORT: {
  276. struct efct_hw_io *hio_to_abort = NULL;
  277. hio_to_abort = io->io_to_abort->hio;
  278. if (!hio_to_abort) {
  279. /*
  280. * If "IO to abort" does not have an
  281. * associated HW IO, immediately make callback with
  282. * success. The command must have been sent to
  283. * the backend, but the data phase has not yet
  284. * started, so we don't have a HW IO.
  285. *
  286. * Note: since the backend shims should be
  287. * taking a reference on io_to_abort, it should not
  288. * be possible to have been completed and freed by
  289. * the backend before the abort got here.
  290. */
  291. scsi_io_printf(io, "IO: not active\n");
  292. ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
  293. SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
  294. rc = 0;
  295. break;
  296. }
  297. /* HW IO is valid, abort it */
  298. scsi_io_printf(io, "aborting\n");
  299. rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
  300. io->send_abts, io->hw_cb, io);
  301. if (rc) {
  302. int status = SLI4_FC_WCQE_STATUS_SUCCESS;
  303. efct_hw_done_t cb = io->hw_cb;
  304. if (rc != -ENOENT && rc != -EINPROGRESS) {
  305. status = -1;
  306. scsi_io_printf(io, "Failed to abort IO rc=%d\n",
  307. rc);
  308. }
  309. cb(io->hio, 0, status, 0, io);
  310. rc = 0;
  311. }
  312. break;
  313. }
  314. default:
  315. scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
  316. rc = -EIO;
  317. break;
  318. }
  319. return rc;
  320. }
  321. static struct efct_io *
  322. efct_scsi_dispatch_pending(struct efct *efct)
  323. {
  324. struct efct_xport *xport = efct->xport;
  325. struct efct_io *io = NULL;
  326. struct efct_hw_io *hio;
  327. unsigned long flags = 0;
  328. int status;
  329. spin_lock_irqsave(&xport->io_pending_lock, flags);
  330. if (!list_empty(&xport->io_pending_list)) {
  331. io = list_first_entry(&xport->io_pending_list, struct efct_io,
  332. io_pending_link);
  333. list_del_init(&io->io_pending_link);
  334. }
  335. if (!io) {
  336. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  337. return NULL;
  338. }
  339. if (io->io_type == EFCT_IO_TYPE_ABORT) {
  340. hio = NULL;
  341. } else {
  342. hio = efct_hw_io_alloc(&efct->hw);
  343. if (!hio) {
  344. /*
  345. * No HW IO available.Put IO back on
  346. * the front of pending list
  347. */
  348. list_add(&xport->io_pending_list, &io->io_pending_link);
  349. io = NULL;
  350. } else {
  351. hio->eq = io->hw_priv;
  352. }
  353. }
  354. /* Must drop the lock before dispatching the IO */
  355. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  356. if (!io)
  357. return NULL;
  358. /*
  359. * We pulled an IO off the pending list,
  360. * and either got an HW IO or don't need one
  361. */
  362. atomic_sub_return(1, &xport->io_pending_count);
  363. if (!hio)
  364. status = efct_scsi_io_dispatch_no_hw_io(io);
  365. else
  366. status = efct_scsi_io_dispatch_hw_io(io, hio);
  367. if (status) {
  368. /*
  369. * Invoke the HW callback, but do so in the
  370. * separate execution context,provided by the
  371. * NOP mailbox completion processing context
  372. * by using efct_hw_async_call()
  373. */
  374. if (efct_hw_async_call(&efct->hw,
  375. efct_scsi_check_pending_async_cb, io)) {
  376. efc_log_debug(efct, "call hw async failed\n");
  377. }
  378. }
  379. return io;
  380. }
  381. void
  382. efct_scsi_check_pending(struct efct *efct)
  383. {
  384. struct efct_xport *xport = efct->xport;
  385. struct efct_io *io = NULL;
  386. int count = 0;
  387. unsigned long flags = 0;
  388. int dispatch = 0;
  389. /* Guard against recursion */
  390. if (atomic_add_return(1, &xport->io_pending_recursing)) {
  391. /* This function is already running. Decrement and return. */
  392. atomic_sub_return(1, &xport->io_pending_recursing);
  393. return;
  394. }
  395. while (efct_scsi_dispatch_pending(efct))
  396. count++;
  397. if (count) {
  398. atomic_sub_return(1, &xport->io_pending_recursing);
  399. return;
  400. }
  401. /*
  402. * If nothing was removed from the list,
  403. * we might be in a case where we need to abort an
  404. * active IO and the abort is on the pending list.
  405. * Look for an abort we can dispatch.
  406. */
  407. spin_lock_irqsave(&xport->io_pending_lock, flags);
  408. list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
  409. if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
  410. /* This IO has a HW IO, so it is
  411. * active. Dispatch the abort.
  412. */
  413. dispatch = 1;
  414. list_del_init(&io->io_pending_link);
  415. atomic_sub_return(1, &xport->io_pending_count);
  416. break;
  417. }
  418. }
  419. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  420. if (dispatch) {
  421. if (efct_scsi_io_dispatch_no_hw_io(io)) {
  422. if (efct_hw_async_call(&efct->hw,
  423. efct_scsi_check_pending_async_cb, io)) {
  424. efc_log_debug(efct, "hw async failed\n");
  425. }
  426. }
  427. }
  428. atomic_sub_return(1, &xport->io_pending_recursing);
  429. }
  430. int
  431. efct_scsi_io_dispatch(struct efct_io *io, void *cb)
  432. {
  433. struct efct_hw_io *hio;
  434. struct efct *efct = io->efct;
  435. struct efct_xport *xport = efct->xport;
  436. unsigned long flags = 0;
  437. io->hw_cb = cb;
  438. /*
  439. * if this IO already has a HW IO, then this is either
  440. * not the first phase of the IO. Send it to the HW.
  441. */
  442. if (io->hio)
  443. return efct_scsi_io_dispatch_hw_io(io, io->hio);
  444. /*
  445. * We don't already have a HW IO associated with the IO. First check
  446. * the pending list. If not empty, add IO to the tail and process the
  447. * pending list.
  448. */
  449. spin_lock_irqsave(&xport->io_pending_lock, flags);
  450. if (!list_empty(&xport->io_pending_list)) {
  451. /*
  452. * If this is a low latency request,
  453. * the put at the front of the IO pending
  454. * queue, otherwise put it at the end of the queue.
  455. */
  456. if (io->low_latency) {
  457. INIT_LIST_HEAD(&io->io_pending_link);
  458. list_add(&xport->io_pending_list, &io->io_pending_link);
  459. } else {
  460. INIT_LIST_HEAD(&io->io_pending_link);
  461. list_add_tail(&io->io_pending_link,
  462. &xport->io_pending_list);
  463. }
  464. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  465. atomic_add_return(1, &xport->io_pending_count);
  466. atomic_add_return(1, &xport->io_total_pending);
  467. /* process pending list */
  468. efct_scsi_check_pending(efct);
  469. return 0;
  470. }
  471. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  472. /*
  473. * We don't have a HW IO associated with the IO and there's nothing
  474. * on the pending list. Attempt to allocate a HW IO and dispatch it.
  475. */
  476. hio = efct_hw_io_alloc(&io->efct->hw);
  477. if (!hio) {
  478. /* Couldn't get a HW IO. Save this IO on the pending list */
  479. spin_lock_irqsave(&xport->io_pending_lock, flags);
  480. INIT_LIST_HEAD(&io->io_pending_link);
  481. list_add_tail(&io->io_pending_link, &xport->io_pending_list);
  482. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  483. atomic_add_return(1, &xport->io_total_pending);
  484. atomic_add_return(1, &xport->io_pending_count);
  485. return 0;
  486. }
  487. /* We successfully allocated a HW IO; dispatch to HW */
  488. return efct_scsi_io_dispatch_hw_io(io, hio);
  489. }
  490. int
  491. efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
  492. {
  493. struct efct *efct = io->efct;
  494. struct efct_xport *xport = efct->xport;
  495. unsigned long flags = 0;
  496. io->hw_cb = cb;
  497. /*
  498. * For aborts, we don't need a HW IO, but we still want
  499. * to pass through the pending list to preserve ordering.
  500. * Thus, if the pending list is not empty, add this abort
  501. * to the pending list and process the pending list.
  502. */
  503. spin_lock_irqsave(&xport->io_pending_lock, flags);
  504. if (!list_empty(&xport->io_pending_list)) {
  505. INIT_LIST_HEAD(&io->io_pending_link);
  506. list_add_tail(&io->io_pending_link, &xport->io_pending_list);
  507. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  508. atomic_add_return(1, &xport->io_pending_count);
  509. atomic_add_return(1, &xport->io_total_pending);
  510. /* process pending list */
  511. efct_scsi_check_pending(efct);
  512. return 0;
  513. }
  514. spin_unlock_irqrestore(&xport->io_pending_lock, flags);
  515. /* nothing on pending list, dispatch abort */
  516. return efct_scsi_io_dispatch_no_hw_io(io);
  517. }
  518. static inline int
  519. efct_scsi_xfer_data(struct efct_io *io, u32 flags,
  520. struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
  521. enum efct_hw_io_type type, int enable_ar,
  522. efct_scsi_io_cb_t cb, void *arg)
  523. {
  524. struct efct *efct;
  525. size_t residual = 0;
  526. io->sgl_count = sgl_count;
  527. efct = io->efct;
  528. scsi_io_trace(io, "%s wire_len %llu\n",
  529. (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
  530. xwire_len);
  531. io->hio_type = type;
  532. io->scsi_tgt_cb = cb;
  533. io->scsi_tgt_cb_arg = arg;
  534. residual = io->exp_xfer_len - io->transferred;
  535. io->wire_len = (xwire_len < residual) ? xwire_len : residual;
  536. residual = (xwire_len - io->wire_len);
  537. memset(&io->iparam, 0, sizeof(io->iparam));
  538. io->iparam.fcp_tgt.ox_id = io->init_task_tag;
  539. io->iparam.fcp_tgt.offset = io->transferred;
  540. io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
  541. io->iparam.fcp_tgt.timeout = io->timeout;
  542. /* if this is the last data phase and there is no residual, enable
  543. * auto-good-response
  544. */
  545. if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
  546. ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
  547. (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
  548. io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
  549. io->auto_resp = true;
  550. } else {
  551. io->auto_resp = false;
  552. }
  553. /* save this transfer length */
  554. io->xfer_req = io->wire_len;
  555. /* Adjust the transferred count to account for overrun
  556. * when the residual is calculated in efct_scsi_send_resp
  557. */
  558. io->transferred += residual;
  559. /* Adjust the SGL size if there is overrun */
  560. if (residual) {
  561. struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
  562. while (residual) {
  563. size_t len = sgl_ptr->len;
  564. if (len > residual) {
  565. sgl_ptr->len = len - residual;
  566. residual = 0;
  567. } else {
  568. sgl_ptr->len = 0;
  569. residual -= len;
  570. io->sgl_count--;
  571. }
  572. sgl_ptr--;
  573. }
  574. }
  575. /* Set latency and WQ steering */
  576. io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
  577. io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
  578. EFCT_SCSI_WQ_STEERING_SHIFT;
  579. io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
  580. EFCT_SCSI_WQ_CLASS_SHIFT;
  581. if (efct->xport) {
  582. struct efct_xport *xport = efct->xport;
  583. if (type == EFCT_HW_IO_TARGET_READ) {
  584. xport->fcp_stats.input_requests++;
  585. xport->fcp_stats.input_bytes += xwire_len;
  586. } else if (type == EFCT_HW_IO_TARGET_WRITE) {
  587. xport->fcp_stats.output_requests++;
  588. xport->fcp_stats.output_bytes += xwire_len;
  589. }
  590. }
  591. return efct_scsi_io_dispatch(io, efct_target_io_cb);
  592. }
  593. int
  594. efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
  595. struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
  596. efct_scsi_io_cb_t cb, void *arg)
  597. {
  598. return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
  599. len, EFCT_HW_IO_TARGET_READ,
  600. enable_tsend_auto_resp(io->efct), cb, arg);
  601. }
  602. int
  603. efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
  604. struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
  605. efct_scsi_io_cb_t cb, void *arg)
  606. {
  607. return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
  608. EFCT_HW_IO_TARGET_WRITE,
  609. enable_treceive_auto_resp(io->efct), cb, arg);
  610. }
  611. int
  612. efct_scsi_send_resp(struct efct_io *io, u32 flags,
  613. struct efct_scsi_cmd_resp *rsp,
  614. efct_scsi_io_cb_t cb, void *arg)
  615. {
  616. struct efct *efct;
  617. int residual;
  618. /* Always try auto resp */
  619. bool auto_resp = true;
  620. u8 scsi_status = 0;
  621. u16 scsi_status_qualifier = 0;
  622. u8 *sense_data = NULL;
  623. u32 sense_data_length = 0;
  624. efct = io->efct;
  625. if (rsp) {
  626. scsi_status = rsp->scsi_status;
  627. scsi_status_qualifier = rsp->scsi_status_qualifier;
  628. sense_data = rsp->sense_data;
  629. sense_data_length = rsp->sense_data_length;
  630. residual = rsp->residual;
  631. } else {
  632. residual = io->exp_xfer_len - io->transferred;
  633. }
  634. io->wire_len = 0;
  635. io->hio_type = EFCT_HW_IO_TARGET_RSP;
  636. io->scsi_tgt_cb = cb;
  637. io->scsi_tgt_cb_arg = arg;
  638. memset(&io->iparam, 0, sizeof(io->iparam));
  639. io->iparam.fcp_tgt.ox_id = io->init_task_tag;
  640. io->iparam.fcp_tgt.offset = 0;
  641. io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
  642. io->iparam.fcp_tgt.timeout = io->timeout;
  643. /* Set low latency queueing request */
  644. io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
  645. io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
  646. EFCT_SCSI_WQ_STEERING_SHIFT;
  647. io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
  648. EFCT_SCSI_WQ_CLASS_SHIFT;
  649. if (scsi_status != 0 || residual || sense_data_length) {
  650. struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
  651. u8 *sns_data;
  652. if (!fcprsp) {
  653. efc_log_err(efct, "NULL response buffer\n");
  654. return -EIO;
  655. }
  656. sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
  657. auto_resp = false;
  658. memset(fcprsp, 0, sizeof(*fcprsp));
  659. io->wire_len += sizeof(*fcprsp);
  660. fcprsp->resp.fr_status = scsi_status;
  661. fcprsp->resp.fr_retry_delay =
  662. cpu_to_be16(scsi_status_qualifier);
  663. /* set residual status if necessary */
  664. if (residual != 0) {
  665. /* FCP: if data transferred is less than the
  666. * amount expected, then this is an underflow.
  667. * If data transferred would have been greater
  668. * than the amount expected this is an overflow
  669. */
  670. if (residual > 0) {
  671. fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
  672. fcprsp->ext.fr_resid = cpu_to_be32(residual);
  673. } else {
  674. fcprsp->resp.fr_flags |= FCP_RESID_OVER;
  675. fcprsp->ext.fr_resid = cpu_to_be32(-residual);
  676. }
  677. }
  678. if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
  679. if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
  680. efc_log_err(efct, "Sense exceeds max size.\n");
  681. return -EIO;
  682. }
  683. fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
  684. memcpy(sns_data, sense_data, sense_data_length);
  685. fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
  686. io->wire_len += sense_data_length;
  687. }
  688. io->sgl[0].addr = io->rspbuf.phys;
  689. io->sgl[0].dif_addr = 0;
  690. io->sgl[0].len = io->wire_len;
  691. io->sgl_count = 1;
  692. }
  693. if (auto_resp)
  694. io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
  695. return efct_scsi_io_dispatch(io, efct_target_io_cb);
  696. }
  697. static int
  698. efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
  699. u32 ext_status, void *app)
  700. {
  701. struct efct_io *io = app;
  702. struct efct *efct;
  703. enum efct_scsi_io_status bls_status;
  704. efct = io->efct;
  705. /* BLS isn't really a "SCSI" concept, but use SCSI status */
  706. if (status) {
  707. io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
  708. bls_status = EFCT_SCSI_STATUS_ERROR;
  709. } else {
  710. bls_status = EFCT_SCSI_STATUS_GOOD;
  711. }
  712. if (io->bls_cb) {
  713. efct_scsi_io_cb_t bls_cb = io->bls_cb;
  714. void *bls_cb_arg = io->bls_cb_arg;
  715. io->bls_cb = NULL;
  716. io->bls_cb_arg = NULL;
  717. /* invoke callback */
  718. bls_cb(io, bls_status, 0, bls_cb_arg);
  719. }
  720. efct_scsi_check_pending(efct);
  721. return 0;
  722. }
  723. static int
  724. efct_target_send_bls_resp(struct efct_io *io,
  725. efct_scsi_io_cb_t cb, void *arg)
  726. {
  727. struct efct_node *node = io->node;
  728. struct sli_bls_params *bls = &io->iparam.bls;
  729. struct efct *efct = node->efct;
  730. struct fc_ba_acc *acc;
  731. int rc;
  732. /* fill out IO structure with everything needed to send BA_ACC */
  733. memset(&io->iparam, 0, sizeof(io->iparam));
  734. bls->ox_id = io->init_task_tag;
  735. bls->rx_id = io->abort_rx_id;
  736. bls->vpi = io->node->vpi;
  737. bls->rpi = io->node->rpi;
  738. bls->s_id = U32_MAX;
  739. bls->d_id = io->node->node_fc_id;
  740. bls->rpi_registered = true;
  741. acc = (void *)bls->payload;
  742. acc->ba_ox_id = cpu_to_be16(bls->ox_id);
  743. acc->ba_rx_id = cpu_to_be16(bls->rx_id);
  744. acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
  745. /* generic io fields have already been populated */
  746. /* set type and BLS-specific fields */
  747. io->io_type = EFCT_IO_TYPE_BLS_RESP;
  748. io->display_name = "bls_rsp";
  749. io->hio_type = EFCT_HW_BLS_ACC;
  750. io->bls_cb = cb;
  751. io->bls_cb_arg = arg;
  752. /* dispatch IO */
  753. rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
  754. efct_target_bls_resp_cb, io);
  755. return rc;
  756. }
  757. static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
  758. u32 ext_status, void *app)
  759. {
  760. struct efct_io *io = app;
  761. efct_scsi_io_free(io);
  762. return 0;
  763. }
  764. struct efct_io *
  765. efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
  766. {
  767. struct efct_node *node = io->node;
  768. struct sli_bls_params *bls = &io->iparam.bls;
  769. struct efct *efct = node->efct;
  770. struct fc_ba_rjt *acc;
  771. int rc;
  772. /* fill out BLS Response-specific fields */
  773. io->io_type = EFCT_IO_TYPE_BLS_RESP;
  774. io->display_name = "ba_rjt";
  775. io->hio_type = EFCT_HW_BLS_RJT;
  776. io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
  777. /* fill out iparam fields */
  778. memset(&io->iparam, 0, sizeof(io->iparam));
  779. bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
  780. bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
  781. bls->vpi = io->node->vpi;
  782. bls->rpi = io->node->rpi;
  783. bls->s_id = U32_MAX;
  784. bls->d_id = io->node->node_fc_id;
  785. bls->rpi_registered = true;
  786. acc = (void *)bls->payload;
  787. acc->br_reason = ELS_RJT_UNAB;
  788. acc->br_explan = ELS_EXPL_NONE;
  789. rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
  790. io);
  791. if (rc) {
  792. efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
  793. efct_scsi_io_free(io);
  794. io = NULL;
  795. }
  796. return io;
  797. }
  798. int
  799. efct_scsi_send_tmf_resp(struct efct_io *io,
  800. enum efct_scsi_tmf_resp rspcode,
  801. u8 addl_rsp_info[3],
  802. efct_scsi_io_cb_t cb, void *arg)
  803. {
  804. int rc;
  805. struct {
  806. struct fcp_resp_with_ext rsp_ext;
  807. struct fcp_resp_rsp_info info;
  808. } *fcprsp;
  809. u8 fcp_rspcode;
  810. io->wire_len = 0;
  811. switch (rspcode) {
  812. case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
  813. fcp_rspcode = FCP_TMF_CMPL;
  814. break;
  815. case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
  816. case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
  817. fcp_rspcode = FCP_TMF_CMPL;
  818. break;
  819. case EFCT_SCSI_TMF_FUNCTION_REJECTED:
  820. fcp_rspcode = FCP_TMF_REJECTED;
  821. break;
  822. case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
  823. fcp_rspcode = FCP_TMF_INVALID_LUN;
  824. break;
  825. case EFCT_SCSI_TMF_SERVICE_DELIVERY:
  826. fcp_rspcode = FCP_TMF_FAILED;
  827. break;
  828. default:
  829. fcp_rspcode = FCP_TMF_REJECTED;
  830. break;
  831. }
  832. io->hio_type = EFCT_HW_IO_TARGET_RSP;
  833. io->scsi_tgt_cb = cb;
  834. io->scsi_tgt_cb_arg = arg;
  835. if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
  836. rc = efct_target_send_bls_resp(io, cb, arg);
  837. return rc;
  838. }
  839. /* populate the FCP TMF response */
  840. fcprsp = io->rspbuf.virt;
  841. memset(fcprsp, 0, sizeof(*fcprsp));
  842. fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
  843. if (addl_rsp_info) {
  844. memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
  845. sizeof(fcprsp->info._fr_resvd));
  846. }
  847. fcprsp->info.rsp_code = fcp_rspcode;
  848. io->wire_len = sizeof(*fcprsp);
  849. fcprsp->rsp_ext.ext.fr_rsp_len =
  850. cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
  851. io->sgl[0].addr = io->rspbuf.phys;
  852. io->sgl[0].dif_addr = 0;
  853. io->sgl[0].len = io->wire_len;
  854. io->sgl_count = 1;
  855. memset(&io->iparam, 0, sizeof(io->iparam));
  856. io->iparam.fcp_tgt.ox_id = io->init_task_tag;
  857. io->iparam.fcp_tgt.offset = 0;
  858. io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
  859. io->iparam.fcp_tgt.timeout = io->timeout;
  860. rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
  861. return rc;
  862. }
  863. static int
  864. efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
  865. u32 ext_status, void *app)
  866. {
  867. struct efct_io *io = app;
  868. struct efct *efct;
  869. enum efct_scsi_io_status scsi_status;
  870. efct_scsi_io_cb_t abort_cb;
  871. void *abort_cb_arg;
  872. efct = io->efct;
  873. if (!io->abort_cb)
  874. goto done;
  875. abort_cb = io->abort_cb;
  876. abort_cb_arg = io->abort_cb_arg;
  877. io->abort_cb = NULL;
  878. io->abort_cb_arg = NULL;
  879. switch (status) {
  880. case SLI4_FC_WCQE_STATUS_SUCCESS:
  881. scsi_status = EFCT_SCSI_STATUS_GOOD;
  882. break;
  883. case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
  884. switch (ext_status) {
  885. case SLI4_FC_LOCAL_REJECT_NO_XRI:
  886. scsi_status = EFCT_SCSI_STATUS_NO_IO;
  887. break;
  888. case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
  889. scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
  890. break;
  891. default:
  892. /*we have seen 0x15 (abort in progress)*/
  893. scsi_status = EFCT_SCSI_STATUS_ERROR;
  894. break;
  895. }
  896. break;
  897. case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
  898. scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
  899. break;
  900. default:
  901. scsi_status = EFCT_SCSI_STATUS_ERROR;
  902. break;
  903. }
  904. /* invoke callback */
  905. abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
  906. done:
  907. /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
  908. kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
  909. efct_io_pool_io_free(efct->xport->io_pool, io);
  910. efct_scsi_check_pending(efct);
  911. return 0;
  912. }
  913. int
  914. efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
  915. {
  916. struct efct *efct;
  917. struct efct_xport *xport;
  918. int rc;
  919. struct efct_io *abort_io = NULL;
  920. efct = io->efct;
  921. xport = efct->xport;
  922. /* take a reference on IO being aborted */
  923. if (kref_get_unless_zero(&io->ref) == 0) {
  924. /* command no longer active */
  925. scsi_io_printf(io, "command no longer active\n");
  926. return -EIO;
  927. }
  928. /*
  929. * allocate a new IO to send the abort request. Use efct_io_alloc()
  930. * directly, as we need an IO object that will not fail allocation
  931. * due to allocations being disabled (in efct_scsi_io_alloc())
  932. */
  933. abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
  934. if (!abort_io) {
  935. atomic_add_return(1, &xport->io_alloc_failed_count);
  936. kref_put(&io->ref, io->release);
  937. return -EIO;
  938. }
  939. /* Save the target server callback and argument */
  940. /* set generic fields */
  941. abort_io->cmd_tgt = true;
  942. abort_io->node = io->node;
  943. /* set type and abort-specific fields */
  944. abort_io->io_type = EFCT_IO_TYPE_ABORT;
  945. abort_io->display_name = "tgt_abort";
  946. abort_io->io_to_abort = io;
  947. abort_io->send_abts = false;
  948. abort_io->abort_cb = cb;
  949. abort_io->abort_cb_arg = arg;
  950. /* now dispatch IO */
  951. rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
  952. if (rc)
  953. kref_put(&io->ref, io->release);
  954. return rc;
  955. }
  956. void
  957. efct_scsi_io_complete(struct efct_io *io)
  958. {
  959. if (io->io_free) {
  960. efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
  961. io->tag);
  962. return;
  963. }
  964. scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
  965. kref_put(&io->ref, io->release);
  966. }