qedf_els.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic FCoE Offload Driver
  4. * Copyright (c) 2016-2018 Cavium Inc.
  5. */
  6. #include "qedf.h"
  7. /* It's assumed that the lock is held when calling this function. */
  8. static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
  9. void *data, uint32_t data_len,
  10. void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
  11. struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
  12. {
  13. struct qedf_ctx *qedf;
  14. struct fc_lport *lport;
  15. struct qedf_ioreq *els_req;
  16. struct qedf_mp_req *mp_req;
  17. struct fc_frame_header *fc_hdr;
  18. struct fcoe_task_context *task;
  19. int rc = 0;
  20. uint32_t did, sid;
  21. uint16_t xid;
  22. struct fcoe_wqe *sqe;
  23. unsigned long flags;
  24. u16 sqe_idx;
  25. if (!fcport) {
  26. QEDF_ERR(NULL, "fcport is NULL");
  27. rc = -EINVAL;
  28. goto els_err;
  29. }
  30. qedf = fcport->qedf;
  31. lport = qedf->lport;
  32. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
  33. rc = fc_remote_port_chkready(fcport->rport);
  34. if (rc) {
  35. QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
  36. rc = -EAGAIN;
  37. goto els_err;
  38. }
  39. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  40. QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
  41. op);
  42. rc = -EAGAIN;
  43. goto els_err;
  44. }
  45. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  46. QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
  47. rc = -EINVAL;
  48. goto els_err;
  49. }
  50. els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
  51. if (!els_req) {
  52. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
  53. "Failed to alloc ELS request 0x%x\n", op);
  54. rc = -ENOMEM;
  55. goto els_err;
  56. }
  57. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
  58. "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
  59. els_req->xid);
  60. els_req->sc_cmd = NULL;
  61. els_req->cmd_type = QEDF_ELS;
  62. els_req->fcport = fcport;
  63. els_req->cb_func = cb_func;
  64. cb_arg->io_req = els_req;
  65. cb_arg->op = op;
  66. els_req->cb_arg = cb_arg;
  67. els_req->data_xfer_len = data_len;
  68. /* Record which cpu this request is associated with */
  69. els_req->cpu = smp_processor_id();
  70. mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
  71. rc = qedf_init_mp_req(els_req);
  72. if (rc) {
  73. QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
  74. kref_put(&els_req->refcount, qedf_release_cmd);
  75. goto els_err;
  76. } else {
  77. rc = 0;
  78. }
  79. /* Fill ELS Payload */
  80. if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
  81. memcpy(mp_req->req_buf, data, data_len);
  82. } else {
  83. QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
  84. els_req->cb_func = NULL;
  85. els_req->cb_arg = NULL;
  86. kref_put(&els_req->refcount, qedf_release_cmd);
  87. rc = -EINVAL;
  88. }
  89. if (rc)
  90. goto els_err;
  91. /* Fill FC header */
  92. fc_hdr = &(mp_req->req_fc_hdr);
  93. did = fcport->rdata->ids.port_id;
  94. sid = fcport->sid;
  95. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
  96. FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
  97. FC_FC_SEQ_INIT, 0);
  98. /* Obtain exchange id */
  99. xid = els_req->xid;
  100. spin_lock_irqsave(&fcport->rport_lock, flags);
  101. sqe_idx = qedf_get_sqe_idx(fcport);
  102. sqe = &fcport->sq[sqe_idx];
  103. memset(sqe, 0, sizeof(struct fcoe_wqe));
  104. /* Initialize task context for this IO request */
  105. task = qedf_get_task_mem(&qedf->tasks, xid);
  106. qedf_init_mp_task(els_req, task, sqe);
  107. /* Put timer on els request */
  108. if (timer_msec)
  109. qedf_cmd_timer_set(qedf, els_req, timer_msec);
  110. /* Ring doorbell */
  111. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
  112. "req\n");
  113. qedf_ring_doorbell(fcport);
  114. set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
  115. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  116. els_err:
  117. return rc;
  118. }
  119. void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
  120. struct qedf_ioreq *els_req)
  121. {
  122. struct fcoe_cqe_midpath_info *mp_info;
  123. struct qedf_rport *fcport;
  124. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
  125. " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
  126. if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
  127. || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
  128. || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
  129. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
  130. "ELS completion xid=0x%x after flush event=0x%x",
  131. els_req->xid, els_req->event);
  132. return;
  133. }
  134. fcport = els_req->fcport;
  135. /* When flush is active,
  136. * let the cmds be completed from the cleanup context
  137. */
  138. if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
  139. test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
  140. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
  141. "Dropping ELS completion xid=0x%x as fcport is flushing",
  142. els_req->xid);
  143. return;
  144. }
  145. clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
  146. /* Kill the ELS timer */
  147. cancel_delayed_work(&els_req->timeout_work);
  148. /* Get ELS response length from CQE */
  149. mp_info = &cqe->cqe_info.midpath_info;
  150. els_req->mp_req.resp_len = mp_info->data_placement_size;
  151. /* Parse ELS response */
  152. if ((els_req->cb_func) && (els_req->cb_arg)) {
  153. els_req->cb_func(els_req->cb_arg);
  154. els_req->cb_arg = NULL;
  155. }
  156. kref_put(&els_req->refcount, qedf_release_cmd);
  157. }
  158. static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
  159. {
  160. struct qedf_ioreq *orig_io_req;
  161. struct qedf_ioreq *rrq_req;
  162. struct qedf_ctx *qedf;
  163. int refcount;
  164. rrq_req = cb_arg->io_req;
  165. qedf = rrq_req->fcport->qedf;
  166. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
  167. orig_io_req = cb_arg->aborted_io_req;
  168. if (!orig_io_req) {
  169. QEDF_ERR(&qedf->dbg_ctx,
  170. "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
  171. goto out_free;
  172. }
  173. refcount = kref_read(&orig_io_req->refcount);
  174. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
  175. " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
  176. orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
  177. /*
  178. * This should return the aborted io_req to the command pool. Note that
  179. * we need to check the refcound in case the original request was
  180. * flushed but we get a completion on this xid.
  181. */
  182. if (orig_io_req && refcount > 0)
  183. kref_put(&orig_io_req->refcount, qedf_release_cmd);
  184. out_free:
  185. /*
  186. * Release a reference to the rrq request if we timed out as the
  187. * rrq completion handler is called directly from the timeout handler
  188. * and not from els_compl where the reference would have normally been
  189. * released.
  190. */
  191. if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
  192. kref_put(&rrq_req->refcount, qedf_release_cmd);
  193. kfree(cb_arg);
  194. }
  195. /* Assumes kref is already held by caller */
  196. int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
  197. {
  198. struct fc_els_rrq rrq;
  199. struct qedf_rport *fcport;
  200. struct fc_lport *lport;
  201. struct qedf_els_cb_arg *cb_arg = NULL;
  202. struct qedf_ctx *qedf;
  203. uint32_t sid;
  204. uint32_t r_a_tov;
  205. int rc;
  206. int refcount;
  207. if (!aborted_io_req) {
  208. QEDF_ERR(NULL, "abort_io_req is NULL.\n");
  209. return -EINVAL;
  210. }
  211. fcport = aborted_io_req->fcport;
  212. if (!fcport) {
  213. refcount = kref_read(&aborted_io_req->refcount);
  214. QEDF_ERR(NULL,
  215. "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
  216. aborted_io_req->xid, refcount);
  217. kref_put(&aborted_io_req->refcount, qedf_release_cmd);
  218. return -EINVAL;
  219. }
  220. /* Check that fcport is still offloaded */
  221. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  222. QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
  223. return -EINVAL;
  224. }
  225. if (!fcport->qedf) {
  226. QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
  227. return -EINVAL;
  228. }
  229. qedf = fcport->qedf;
  230. /*
  231. * Sanity check that we can send a RRQ to make sure that refcount isn't
  232. * 0
  233. */
  234. refcount = kref_read(&aborted_io_req->refcount);
  235. if (refcount != 1) {
  236. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
  237. "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
  238. aborted_io_req->xid, aborted_io_req, refcount);
  239. return -EINVAL;
  240. }
  241. lport = qedf->lport;
  242. sid = fcport->sid;
  243. r_a_tov = lport->r_a_tov;
  244. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
  245. "io = %p, orig_xid = 0x%x\n", aborted_io_req,
  246. aborted_io_req->xid);
  247. memset(&rrq, 0, sizeof(rrq));
  248. cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
  249. if (!cb_arg) {
  250. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
  251. "RRQ\n");
  252. rc = -ENOMEM;
  253. goto rrq_err;
  254. }
  255. cb_arg->aborted_io_req = aborted_io_req;
  256. rrq.rrq_cmd = ELS_RRQ;
  257. hton24(rrq.rrq_s_id, sid);
  258. rrq.rrq_ox_id = htons(aborted_io_req->xid);
  259. rrq.rrq_rx_id =
  260. htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
  261. rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
  262. qedf_rrq_compl, cb_arg, r_a_tov);
  263. rrq_err:
  264. if (rc) {
  265. QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
  266. "req 0x%x\n", aborted_io_req->xid);
  267. kfree(cb_arg);
  268. kref_put(&aborted_io_req->refcount, qedf_release_cmd);
  269. }
  270. return rc;
  271. }
  272. static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
  273. struct fc_frame *fp,
  274. u16 l2_oxid)
  275. {
  276. struct fc_lport *lport = fcport->qedf->lport;
  277. struct fc_frame_header *fh;
  278. u32 crc;
  279. fh = (struct fc_frame_header *)fc_frame_header_get(fp);
  280. /* Set the OXID we return to what libfc used */
  281. if (l2_oxid != FC_XID_UNKNOWN)
  282. fh->fh_ox_id = htons(l2_oxid);
  283. /* Setup header fields */
  284. fh->fh_r_ctl = FC_RCTL_ELS_REP;
  285. fh->fh_type = FC_TYPE_ELS;
  286. /* Last sequence, end sequence */
  287. fh->fh_f_ctl[0] = 0x98;
  288. hton24(fh->fh_d_id, lport->port_id);
  289. hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
  290. fh->fh_rx_id = 0xffff;
  291. /* Set frame attributes */
  292. crc = fcoe_fc_crc(fp);
  293. fc_frame_init(fp);
  294. fr_dev(fp) = lport;
  295. fr_sof(fp) = FC_SOF_I3;
  296. fr_eof(fp) = FC_EOF_T;
  297. fr_crc(fp) = cpu_to_le32(~crc);
  298. /* Send completed request to libfc */
  299. fc_exch_recv(lport, fp);
  300. }
  301. /*
  302. * In instances where an ELS command times out we may need to restart the
  303. * rport by logging out and then logging back in.
  304. */
  305. void qedf_restart_rport(struct qedf_rport *fcport)
  306. {
  307. struct fc_lport *lport;
  308. struct fc_rport_priv *rdata;
  309. u32 port_id;
  310. unsigned long flags;
  311. if (!fcport) {
  312. QEDF_ERR(NULL, "fcport is NULL.\n");
  313. return;
  314. }
  315. spin_lock_irqsave(&fcport->rport_lock, flags);
  316. if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
  317. !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
  318. test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
  319. QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
  320. fcport);
  321. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  322. return;
  323. }
  324. /* Set that we are now in reset */
  325. set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
  326. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  327. rdata = fcport->rdata;
  328. if (rdata && !kref_get_unless_zero(&rdata->kref)) {
  329. fcport->rdata = NULL;
  330. rdata = NULL;
  331. }
  332. if (rdata && rdata->rp_state == RPORT_ST_READY) {
  333. lport = fcport->qedf->lport;
  334. port_id = rdata->ids.port_id;
  335. QEDF_ERR(&(fcport->qedf->dbg_ctx),
  336. "LOGO port_id=%x.\n", port_id);
  337. fc_rport_logoff(rdata);
  338. kref_put(&rdata->kref, fc_rport_destroy);
  339. mutex_lock(&lport->disc.disc_mutex);
  340. /* Recreate the rport and log back in */
  341. rdata = fc_rport_create(lport, port_id);
  342. mutex_unlock(&lport->disc.disc_mutex);
  343. if (rdata)
  344. fc_rport_login(rdata);
  345. fcport->rdata = rdata;
  346. }
  347. clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
  348. }
  349. static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
  350. {
  351. struct qedf_ioreq *els_req;
  352. struct qedf_rport *fcport;
  353. struct qedf_mp_req *mp_req;
  354. struct fc_frame *fp;
  355. struct fc_frame_header *fh, *mp_fc_hdr;
  356. void *resp_buf, *fc_payload;
  357. u32 resp_len;
  358. u16 l2_oxid;
  359. l2_oxid = cb_arg->l2_oxid;
  360. els_req = cb_arg->io_req;
  361. if (!els_req) {
  362. QEDF_ERR(NULL, "els_req is NULL.\n");
  363. goto free_arg;
  364. }
  365. /*
  366. * If we are flushing the command just free the cb_arg as none of the
  367. * response data will be valid.
  368. */
  369. if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
  370. QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
  371. els_req->xid);
  372. goto free_arg;
  373. }
  374. fcport = els_req->fcport;
  375. mp_req = &(els_req->mp_req);
  376. mp_fc_hdr = &(mp_req->resp_fc_hdr);
  377. resp_len = mp_req->resp_len;
  378. resp_buf = mp_req->resp_buf;
  379. /*
  380. * If a middle path ELS command times out, don't try to return
  381. * the command but rather do any internal cleanup and then libfc
  382. * timeout the command and clean up its internal resources.
  383. */
  384. if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
  385. /*
  386. * If ADISC times out, libfc will timeout the exchange and then
  387. * try to send a PLOGI which will timeout since the session is
  388. * still offloaded. Force libfc to logout the session which
  389. * will offload the connection and allow the PLOGI response to
  390. * flow over the LL2 path.
  391. */
  392. if (cb_arg->op == ELS_ADISC)
  393. qedf_restart_rport(fcport);
  394. return;
  395. }
  396. if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
  397. QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
  398. "beyond page size.\n");
  399. goto free_arg;
  400. }
  401. fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
  402. if (!fp) {
  403. QEDF_ERR(&(fcport->qedf->dbg_ctx),
  404. "fc_frame_alloc failure.\n");
  405. return;
  406. }
  407. /* Copy frame header from firmware into fp */
  408. fh = (struct fc_frame_header *)fc_frame_header_get(fp);
  409. memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
  410. /* Copy payload from firmware into fp */
  411. fc_payload = fc_frame_payload_get(fp, resp_len);
  412. memcpy(fc_payload, resp_buf, resp_len);
  413. QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
  414. "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
  415. qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
  416. free_arg:
  417. kfree(cb_arg);
  418. }
  419. int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
  420. {
  421. struct fc_els_adisc *adisc;
  422. struct fc_frame_header *fh;
  423. struct fc_lport *lport = fcport->qedf->lport;
  424. struct qedf_els_cb_arg *cb_arg = NULL;
  425. struct qedf_ctx *qedf;
  426. uint32_t r_a_tov = lport->r_a_tov;
  427. int rc;
  428. qedf = fcport->qedf;
  429. fh = fc_frame_header_get(fp);
  430. cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
  431. if (!cb_arg) {
  432. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
  433. "ADISC\n");
  434. rc = -ENOMEM;
  435. goto adisc_err;
  436. }
  437. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  438. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  439. "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
  440. adisc = fc_frame_payload_get(fp, sizeof(*adisc));
  441. rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
  442. qedf_l2_els_compl, cb_arg, r_a_tov);
  443. adisc_err:
  444. if (rc) {
  445. QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
  446. kfree(cb_arg);
  447. }
  448. return rc;
  449. }
  450. static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
  451. {
  452. struct qedf_ioreq *orig_io_req;
  453. struct qedf_ioreq *srr_req;
  454. struct qedf_mp_req *mp_req;
  455. struct fc_frame_header *mp_fc_hdr, *fh;
  456. struct fc_frame *fp;
  457. void *resp_buf, *fc_payload;
  458. u32 resp_len;
  459. struct fc_lport *lport;
  460. struct qedf_ctx *qedf;
  461. int refcount;
  462. u8 opcode;
  463. srr_req = cb_arg->io_req;
  464. qedf = srr_req->fcport->qedf;
  465. lport = qedf->lport;
  466. orig_io_req = cb_arg->aborted_io_req;
  467. if (!orig_io_req) {
  468. QEDF_ERR(NULL, "orig_io_req is NULL.\n");
  469. goto out_free;
  470. }
  471. clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
  472. if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
  473. srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
  474. cancel_delayed_work_sync(&orig_io_req->timeout_work);
  475. refcount = kref_read(&orig_io_req->refcount);
  476. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
  477. " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
  478. orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
  479. /* If a SRR times out, simply free resources */
  480. if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
  481. QEDF_ERR(&qedf->dbg_ctx,
  482. "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
  483. goto out_put;
  484. }
  485. /* Normalize response data into struct fc_frame */
  486. mp_req = &(srr_req->mp_req);
  487. mp_fc_hdr = &(mp_req->resp_fc_hdr);
  488. resp_len = mp_req->resp_len;
  489. resp_buf = mp_req->resp_buf;
  490. fp = fc_frame_alloc(lport, resp_len);
  491. if (!fp) {
  492. QEDF_ERR(&(qedf->dbg_ctx),
  493. "fc_frame_alloc failure.\n");
  494. goto out_put;
  495. }
  496. /* Copy frame header from firmware into fp */
  497. fh = (struct fc_frame_header *)fc_frame_header_get(fp);
  498. memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
  499. /* Copy payload from firmware into fp */
  500. fc_payload = fc_frame_payload_get(fp, resp_len);
  501. memcpy(fc_payload, resp_buf, resp_len);
  502. opcode = fc_frame_payload_op(fp);
  503. switch (opcode) {
  504. case ELS_LS_ACC:
  505. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  506. "SRR success.\n");
  507. break;
  508. case ELS_LS_RJT:
  509. QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
  510. "SRR rejected.\n");
  511. qedf_initiate_abts(orig_io_req, true);
  512. break;
  513. }
  514. fc_frame_free(fp);
  515. out_put:
  516. /* Put reference for original command since SRR completed */
  517. kref_put(&orig_io_req->refcount, qedf_release_cmd);
  518. out_free:
  519. kfree(cb_arg);
  520. }
  521. static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
  522. {
  523. struct fcp_srr srr;
  524. struct qedf_ctx *qedf;
  525. struct qedf_rport *fcport;
  526. struct fc_lport *lport;
  527. struct qedf_els_cb_arg *cb_arg = NULL;
  528. u32 r_a_tov;
  529. int rc;
  530. if (!orig_io_req) {
  531. QEDF_ERR(NULL, "orig_io_req is NULL.\n");
  532. return -EINVAL;
  533. }
  534. fcport = orig_io_req->fcport;
  535. /* Check that fcport is still offloaded */
  536. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  537. QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
  538. return -EINVAL;
  539. }
  540. if (!fcport->qedf) {
  541. QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
  542. return -EINVAL;
  543. }
  544. /* Take reference until SRR command completion */
  545. kref_get(&orig_io_req->refcount);
  546. qedf = fcport->qedf;
  547. lport = qedf->lport;
  548. r_a_tov = lport->r_a_tov;
  549. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
  550. "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
  551. memset(&srr, 0, sizeof(srr));
  552. cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
  553. if (!cb_arg) {
  554. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
  555. "SRR\n");
  556. rc = -ENOMEM;
  557. goto srr_err;
  558. }
  559. cb_arg->aborted_io_req = orig_io_req;
  560. srr.srr_op = ELS_SRR;
  561. srr.srr_ox_id = htons(orig_io_req->xid);
  562. srr.srr_rx_id = htons(orig_io_req->rx_id);
  563. srr.srr_rel_off = htonl(offset);
  564. srr.srr_r_ctl = r_ctl;
  565. rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
  566. qedf_srr_compl, cb_arg, r_a_tov);
  567. srr_err:
  568. if (rc) {
  569. QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
  570. "=0x%x\n", orig_io_req->xid);
  571. kfree(cb_arg);
  572. /* If we fail to queue SRR, send ABTS to orig_io */
  573. qedf_initiate_abts(orig_io_req, true);
  574. kref_put(&orig_io_req->refcount, qedf_release_cmd);
  575. } else
  576. /* Tell other threads that SRR is in progress */
  577. set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
  578. return rc;
  579. }
  580. static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
  581. u32 offset, u8 r_ctl)
  582. {
  583. struct qedf_rport *fcport;
  584. unsigned long flags;
  585. struct qedf_els_cb_arg *cb_arg;
  586. struct fcoe_wqe *sqe;
  587. u16 sqe_idx;
  588. fcport = orig_io_req->fcport;
  589. QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
  590. "Doing sequence cleanup for xid=0x%x offset=%u.\n",
  591. orig_io_req->xid, offset);
  592. cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
  593. if (!cb_arg) {
  594. QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
  595. "for sequence cleanup\n");
  596. return;
  597. }
  598. /* Get reference for cleanup request */
  599. kref_get(&orig_io_req->refcount);
  600. orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
  601. cb_arg->offset = offset;
  602. cb_arg->r_ctl = r_ctl;
  603. orig_io_req->cb_arg = cb_arg;
  604. qedf_cmd_timer_set(fcport->qedf, orig_io_req,
  605. QEDF_CLEANUP_TIMEOUT * HZ);
  606. spin_lock_irqsave(&fcport->rport_lock, flags);
  607. sqe_idx = qedf_get_sqe_idx(fcport);
  608. sqe = &fcport->sq[sqe_idx];
  609. memset(sqe, 0, sizeof(struct fcoe_wqe));
  610. orig_io_req->task_params->sqe = sqe;
  611. init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
  612. offset);
  613. qedf_ring_doorbell(fcport);
  614. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  615. }
  616. void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
  617. struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
  618. {
  619. int rc;
  620. struct qedf_els_cb_arg *cb_arg;
  621. cb_arg = io_req->cb_arg;
  622. /* If we timed out just free resources */
  623. if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
  624. QEDF_ERR(&qedf->dbg_ctx,
  625. "cqe is NULL or timeout event (0x%x)", io_req->event);
  626. goto free;
  627. }
  628. /* Kill the timer we put on the request */
  629. cancel_delayed_work_sync(&io_req->timeout_work);
  630. rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
  631. if (rc)
  632. QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
  633. "abort, xid=0x%x.\n", io_req->xid);
  634. free:
  635. kfree(cb_arg);
  636. kref_put(&io_req->refcount, qedf_release_cmd);
  637. }
  638. static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
  639. {
  640. struct qedf_rport *fcport;
  641. struct qedf_ioreq *new_io_req;
  642. unsigned long flags;
  643. bool rc = false;
  644. fcport = orig_io_req->fcport;
  645. if (!fcport) {
  646. QEDF_ERR(NULL, "fcport is NULL.\n");
  647. goto out;
  648. }
  649. if (!orig_io_req->sc_cmd) {
  650. QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
  651. "xid=0x%x.\n", orig_io_req->xid);
  652. goto out;
  653. }
  654. new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
  655. if (!new_io_req) {
  656. QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
  657. "io_req.\n");
  658. goto out;
  659. }
  660. new_io_req->sc_cmd = orig_io_req->sc_cmd;
  661. /*
  662. * This keeps the sc_cmd struct from being returned to the tape
  663. * driver and being requeued twice. We do need to put a reference
  664. * for the original I/O request since we will not do a SCSI completion
  665. * for it.
  666. */
  667. orig_io_req->sc_cmd = NULL;
  668. kref_put(&orig_io_req->refcount, qedf_release_cmd);
  669. spin_lock_irqsave(&fcport->rport_lock, flags);
  670. /* kref for new command released in qedf_post_io_req on error */
  671. if (qedf_post_io_req(fcport, new_io_req)) {
  672. QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
  673. /* Return SQE to pool */
  674. atomic_inc(&fcport->free_sqes);
  675. } else {
  676. QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
  677. "Reissued SCSI command from orig_xid=0x%x on "
  678. "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
  679. /*
  680. * Abort the original I/O but do not return SCSI command as
  681. * it has been reissued on another OX_ID.
  682. */
  683. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  684. qedf_initiate_abts(orig_io_req, false);
  685. goto out;
  686. }
  687. spin_unlock_irqrestore(&fcport->rport_lock, flags);
  688. out:
  689. return rc;
  690. }
  691. static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
  692. {
  693. struct qedf_ioreq *orig_io_req;
  694. struct qedf_ioreq *rec_req;
  695. struct qedf_mp_req *mp_req;
  696. struct fc_frame_header *mp_fc_hdr, *fh;
  697. struct fc_frame *fp;
  698. void *resp_buf, *fc_payload;
  699. u32 resp_len;
  700. struct fc_lport *lport;
  701. struct qedf_ctx *qedf;
  702. int refcount;
  703. enum fc_rctl r_ctl;
  704. struct fc_els_ls_rjt *rjt;
  705. struct fc_els_rec_acc *acc;
  706. u8 opcode;
  707. u32 offset, e_stat;
  708. struct scsi_cmnd *sc_cmd;
  709. bool srr_needed = false;
  710. rec_req = cb_arg->io_req;
  711. qedf = rec_req->fcport->qedf;
  712. lport = qedf->lport;
  713. orig_io_req = cb_arg->aborted_io_req;
  714. if (!orig_io_req) {
  715. QEDF_ERR(NULL, "orig_io_req is NULL.\n");
  716. goto out_free;
  717. }
  718. if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
  719. rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
  720. cancel_delayed_work_sync(&orig_io_req->timeout_work);
  721. refcount = kref_read(&orig_io_req->refcount);
  722. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
  723. " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
  724. orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
  725. /* If a REC times out, free resources */
  726. if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
  727. QEDF_ERR(&qedf->dbg_ctx,
  728. "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
  729. orig_io_req, orig_io_req->xid);
  730. goto out_put;
  731. }
  732. /* Normalize response data into struct fc_frame */
  733. mp_req = &(rec_req->mp_req);
  734. mp_fc_hdr = &(mp_req->resp_fc_hdr);
  735. resp_len = mp_req->resp_len;
  736. acc = resp_buf = mp_req->resp_buf;
  737. fp = fc_frame_alloc(lport, resp_len);
  738. if (!fp) {
  739. QEDF_ERR(&(qedf->dbg_ctx),
  740. "fc_frame_alloc failure.\n");
  741. goto out_put;
  742. }
  743. /* Copy frame header from firmware into fp */
  744. fh = (struct fc_frame_header *)fc_frame_header_get(fp);
  745. memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
  746. /* Copy payload from firmware into fp */
  747. fc_payload = fc_frame_payload_get(fp, resp_len);
  748. memcpy(fc_payload, resp_buf, resp_len);
  749. opcode = fc_frame_payload_op(fp);
  750. if (opcode == ELS_LS_RJT) {
  751. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  752. if (!rjt) {
  753. QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
  754. goto out_free_frame;
  755. }
  756. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  757. "Received LS_RJT for REC: er_reason=0x%x, "
  758. "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
  759. /*
  760. * The following response(s) mean that we need to reissue the
  761. * request on another exchange. We need to do this without
  762. * informing the upper layers lest it cause an application
  763. * error.
  764. */
  765. if ((rjt->er_reason == ELS_RJT_LOGIC ||
  766. rjt->er_reason == ELS_RJT_UNAB) &&
  767. rjt->er_explan == ELS_EXPL_OXID_RXID) {
  768. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  769. "Handle CMD LOST case.\n");
  770. qedf_requeue_io_req(orig_io_req);
  771. }
  772. } else if (opcode == ELS_LS_ACC) {
  773. offset = ntohl(acc->reca_fc4value);
  774. e_stat = ntohl(acc->reca_e_stat);
  775. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  776. "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
  777. offset, e_stat);
  778. if (e_stat & ESB_ST_SEQ_INIT) {
  779. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  780. "Target has the seq init\n");
  781. goto out_free_frame;
  782. }
  783. sc_cmd = orig_io_req->sc_cmd;
  784. if (!sc_cmd) {
  785. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  786. "sc_cmd is NULL for xid=0x%x.\n",
  787. orig_io_req->xid);
  788. goto out_free_frame;
  789. }
  790. /* SCSI write case */
  791. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  792. if (offset == orig_io_req->data_xfer_len) {
  793. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  794. "WRITE - response lost.\n");
  795. r_ctl = FC_RCTL_DD_CMD_STATUS;
  796. srr_needed = true;
  797. offset = 0;
  798. } else {
  799. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  800. "WRITE - XFER_RDY/DATA lost.\n");
  801. r_ctl = FC_RCTL_DD_DATA_DESC;
  802. /* Use data from warning CQE instead of REC */
  803. offset = orig_io_req->tx_buf_off;
  804. }
  805. /* SCSI read case */
  806. } else {
  807. if (orig_io_req->rx_buf_off ==
  808. orig_io_req->data_xfer_len) {
  809. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  810. "READ - response lost.\n");
  811. srr_needed = true;
  812. r_ctl = FC_RCTL_DD_CMD_STATUS;
  813. offset = 0;
  814. } else {
  815. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  816. "READ - DATA lost.\n");
  817. /*
  818. * For read case we always set the offset to 0
  819. * for sequence recovery task.
  820. */
  821. offset = 0;
  822. r_ctl = FC_RCTL_DD_SOL_DATA;
  823. }
  824. }
  825. if (srr_needed)
  826. qedf_send_srr(orig_io_req, offset, r_ctl);
  827. else
  828. qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
  829. }
  830. out_free_frame:
  831. fc_frame_free(fp);
  832. out_put:
  833. /* Put reference for original command since REC completed */
  834. kref_put(&orig_io_req->refcount, qedf_release_cmd);
  835. out_free:
  836. kfree(cb_arg);
  837. }
  838. /* Assumes kref is already held by caller */
  839. int qedf_send_rec(struct qedf_ioreq *orig_io_req)
  840. {
  841. struct fc_els_rec rec;
  842. struct qedf_rport *fcport;
  843. struct fc_lport *lport;
  844. struct qedf_els_cb_arg *cb_arg = NULL;
  845. struct qedf_ctx *qedf;
  846. uint32_t sid;
  847. uint32_t r_a_tov;
  848. int rc;
  849. if (!orig_io_req) {
  850. QEDF_ERR(NULL, "orig_io_req is NULL.\n");
  851. return -EINVAL;
  852. }
  853. fcport = orig_io_req->fcport;
  854. /* Check that fcport is still offloaded */
  855. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  856. QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
  857. return -EINVAL;
  858. }
  859. if (!fcport->qedf) {
  860. QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
  861. return -EINVAL;
  862. }
  863. /* Take reference until REC command completion */
  864. kref_get(&orig_io_req->refcount);
  865. qedf = fcport->qedf;
  866. lport = qedf->lport;
  867. sid = fcport->sid;
  868. r_a_tov = lport->r_a_tov;
  869. memset(&rec, 0, sizeof(rec));
  870. cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
  871. if (!cb_arg) {
  872. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
  873. "REC\n");
  874. rc = -ENOMEM;
  875. goto rec_err;
  876. }
  877. cb_arg->aborted_io_req = orig_io_req;
  878. rec.rec_cmd = ELS_REC;
  879. hton24(rec.rec_s_id, sid);
  880. rec.rec_ox_id = htons(orig_io_req->xid);
  881. rec.rec_rx_id =
  882. htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
  883. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
  884. "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
  885. orig_io_req->xid, rec.rec_rx_id);
  886. rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
  887. qedf_rec_compl, cb_arg, r_a_tov);
  888. rec_err:
  889. if (rc) {
  890. QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
  891. "=0x%x\n", orig_io_req->xid);
  892. kfree(cb_arg);
  893. kref_put(&orig_io_req->refcount, qedf_release_cmd);
  894. }
  895. return rc;
  896. }