qedi_fw.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic iSCSI Offload Driver
  4. * Copyright (c) 2016 Cavium Inc.
  5. */
  6. #include <linux/blkdev.h>
  7. #include <scsi/scsi_tcq.h>
  8. #include <linux/delay.h>
  9. #include "qedi.h"
  10. #include "qedi_iscsi.h"
  11. #include "qedi_gbl.h"
  12. #include "qedi_fw_iscsi.h"
  13. #include "qedi_fw_scsi.h"
  14. static int send_iscsi_tmf(struct qedi_conn *qedi_conn,
  15. struct iscsi_task *mtask, struct iscsi_task *ctask);
  16. void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
  17. {
  18. struct scsi_cmnd *sc = cmd->scsi_cmd;
  19. if (cmd->io_tbl.sge_valid && sc) {
  20. cmd->io_tbl.sge_valid = 0;
  21. scsi_dma_unmap(sc);
  22. }
  23. }
  24. static void qedi_process_logout_resp(struct qedi_ctx *qedi,
  25. union iscsi_cqe *cqe,
  26. struct iscsi_task *task,
  27. struct qedi_conn *qedi_conn)
  28. {
  29. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  30. struct iscsi_logout_rsp *resp_hdr;
  31. struct iscsi_session *session = conn->session;
  32. struct iscsi_logout_response_hdr *cqe_logout_response;
  33. struct qedi_cmd *cmd;
  34. cmd = (struct qedi_cmd *)task->dd_data;
  35. cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
  36. spin_lock(&session->back_lock);
  37. resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
  38. memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
  39. resp_hdr->opcode = cqe_logout_response->opcode;
  40. resp_hdr->flags = cqe_logout_response->flags;
  41. resp_hdr->hlength = 0;
  42. resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
  43. resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
  44. resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
  45. resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
  46. resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
  47. resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
  48. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  49. "Freeing tid=0x%x for cid=0x%x\n",
  50. cmd->task_id, qedi_conn->iscsi_conn_id);
  51. spin_lock(&qedi_conn->list_lock);
  52. if (likely(cmd->io_cmd_in_list)) {
  53. cmd->io_cmd_in_list = false;
  54. list_del_init(&cmd->io_cmd);
  55. qedi_conn->active_cmd_count--;
  56. } else {
  57. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  58. "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
  59. cmd->task_id, qedi_conn->iscsi_conn_id,
  60. &cmd->io_cmd);
  61. }
  62. spin_unlock(&qedi_conn->list_lock);
  63. cmd->state = RESPONSE_RECEIVED;
  64. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
  65. spin_unlock(&session->back_lock);
  66. }
  67. static void qedi_process_text_resp(struct qedi_ctx *qedi,
  68. union iscsi_cqe *cqe,
  69. struct iscsi_task *task,
  70. struct qedi_conn *qedi_conn)
  71. {
  72. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  73. struct iscsi_session *session = conn->session;
  74. struct iscsi_task_context *task_ctx;
  75. struct iscsi_text_rsp *resp_hdr_ptr;
  76. struct iscsi_text_response_hdr *cqe_text_response;
  77. struct qedi_cmd *cmd;
  78. int pld_len;
  79. cmd = (struct qedi_cmd *)task->dd_data;
  80. task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
  81. cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
  82. spin_lock(&session->back_lock);
  83. resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
  84. memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
  85. resp_hdr_ptr->opcode = cqe_text_response->opcode;
  86. resp_hdr_ptr->flags = cqe_text_response->flags;
  87. resp_hdr_ptr->hlength = 0;
  88. hton24(resp_hdr_ptr->dlength,
  89. (cqe_text_response->hdr_second_dword &
  90. ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
  91. resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
  92. conn->session->age);
  93. resp_hdr_ptr->ttt = cqe_text_response->ttt;
  94. resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
  95. resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
  96. resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
  97. pld_len = cqe_text_response->hdr_second_dword &
  98. ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
  99. qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
  100. memset(task_ctx, '\0', sizeof(*task_ctx));
  101. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  102. "Freeing tid=0x%x for cid=0x%x\n",
  103. cmd->task_id, qedi_conn->iscsi_conn_id);
  104. spin_lock(&qedi_conn->list_lock);
  105. if (likely(cmd->io_cmd_in_list)) {
  106. cmd->io_cmd_in_list = false;
  107. list_del_init(&cmd->io_cmd);
  108. qedi_conn->active_cmd_count--;
  109. } else {
  110. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  111. "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
  112. cmd->task_id, qedi_conn->iscsi_conn_id,
  113. &cmd->io_cmd);
  114. }
  115. spin_unlock(&qedi_conn->list_lock);
  116. cmd->state = RESPONSE_RECEIVED;
  117. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
  118. qedi_conn->gen_pdu.resp_buf,
  119. (qedi_conn->gen_pdu.resp_wr_ptr -
  120. qedi_conn->gen_pdu.resp_buf));
  121. spin_unlock(&session->back_lock);
  122. }
  123. static void qedi_tmf_resp_work(struct work_struct *work)
  124. {
  125. struct qedi_cmd *qedi_cmd =
  126. container_of(work, struct qedi_cmd, tmf_work);
  127. struct qedi_conn *qedi_conn = qedi_cmd->conn;
  128. struct qedi_ctx *qedi = qedi_conn->qedi;
  129. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  130. struct iscsi_session *session = conn->session;
  131. struct iscsi_tm_rsp *resp_hdr_ptr;
  132. int rval = 0;
  133. resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
  134. rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
  135. if (rval)
  136. goto exit_tmf_resp;
  137. spin_lock(&session->back_lock);
  138. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
  139. spin_unlock(&session->back_lock);
  140. exit_tmf_resp:
  141. kfree(resp_hdr_ptr);
  142. spin_lock(&qedi_conn->tmf_work_lock);
  143. qedi_conn->fw_cleanup_works--;
  144. spin_unlock(&qedi_conn->tmf_work_lock);
  145. }
  146. static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
  147. union iscsi_cqe *cqe,
  148. struct iscsi_task *task,
  149. struct qedi_conn *qedi_conn)
  150. {
  151. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  152. struct iscsi_session *session = conn->session;
  153. struct iscsi_tmf_response_hdr *cqe_tmp_response;
  154. struct iscsi_tm_rsp *resp_hdr_ptr;
  155. struct iscsi_tm *tmf_hdr;
  156. struct qedi_cmd *qedi_cmd = NULL;
  157. cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
  158. qedi_cmd = task->dd_data;
  159. qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
  160. if (!qedi_cmd->tmf_resp_buf) {
  161. QEDI_ERR(&qedi->dbg_ctx,
  162. "Failed to allocate resp buf, cid=0x%x\n",
  163. qedi_conn->iscsi_conn_id);
  164. return;
  165. }
  166. spin_lock(&session->back_lock);
  167. resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
  168. memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
  169. /* Fill up the header */
  170. resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
  171. resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
  172. resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
  173. resp_hdr_ptr->hlength = 0;
  174. hton24(resp_hdr_ptr->dlength,
  175. (cqe_tmp_response->hdr_second_dword &
  176. ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
  177. resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
  178. conn->session->age);
  179. resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
  180. resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
  181. resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
  182. tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
  183. spin_lock(&qedi_conn->list_lock);
  184. if (likely(qedi_cmd->io_cmd_in_list)) {
  185. qedi_cmd->io_cmd_in_list = false;
  186. list_del_init(&qedi_cmd->io_cmd);
  187. qedi_conn->active_cmd_count--;
  188. }
  189. spin_unlock(&qedi_conn->list_lock);
  190. spin_lock(&qedi_conn->tmf_work_lock);
  191. switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
  192. case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
  193. case ISCSI_TM_FUNC_TARGET_WARM_RESET:
  194. case ISCSI_TM_FUNC_TARGET_COLD_RESET:
  195. if (qedi_conn->ep_disconnect_starting) {
  196. /* Session is down so ep_disconnect will clean up */
  197. spin_unlock(&qedi_conn->tmf_work_lock);
  198. goto unblock_sess;
  199. }
  200. qedi_conn->fw_cleanup_works++;
  201. spin_unlock(&qedi_conn->tmf_work_lock);
  202. INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
  203. queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
  204. goto unblock_sess;
  205. }
  206. spin_unlock(&qedi_conn->tmf_work_lock);
  207. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
  208. kfree(resp_hdr_ptr);
  209. unblock_sess:
  210. spin_unlock(&session->back_lock);
  211. }
  212. static void qedi_process_login_resp(struct qedi_ctx *qedi,
  213. union iscsi_cqe *cqe,
  214. struct iscsi_task *task,
  215. struct qedi_conn *qedi_conn)
  216. {
  217. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  218. struct iscsi_session *session = conn->session;
  219. struct iscsi_task_context *task_ctx;
  220. struct iscsi_login_rsp *resp_hdr_ptr;
  221. struct iscsi_login_response_hdr *cqe_login_response;
  222. struct qedi_cmd *cmd;
  223. int pld_len;
  224. cmd = (struct qedi_cmd *)task->dd_data;
  225. cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
  226. task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
  227. spin_lock(&session->back_lock);
  228. resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
  229. memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
  230. resp_hdr_ptr->opcode = cqe_login_response->opcode;
  231. resp_hdr_ptr->flags = cqe_login_response->flags_attr;
  232. resp_hdr_ptr->hlength = 0;
  233. hton24(resp_hdr_ptr->dlength,
  234. (cqe_login_response->hdr_second_dword &
  235. ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
  236. resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
  237. conn->session->age);
  238. resp_hdr_ptr->tsih = cqe_login_response->tsih;
  239. resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
  240. resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
  241. resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
  242. resp_hdr_ptr->status_class = cqe_login_response->status_class;
  243. resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
  244. pld_len = cqe_login_response->hdr_second_dword &
  245. ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
  246. qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
  247. spin_lock(&qedi_conn->list_lock);
  248. if (likely(cmd->io_cmd_in_list)) {
  249. cmd->io_cmd_in_list = false;
  250. list_del_init(&cmd->io_cmd);
  251. qedi_conn->active_cmd_count--;
  252. }
  253. spin_unlock(&qedi_conn->list_lock);
  254. memset(task_ctx, '\0', sizeof(*task_ctx));
  255. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
  256. qedi_conn->gen_pdu.resp_buf,
  257. (qedi_conn->gen_pdu.resp_wr_ptr -
  258. qedi_conn->gen_pdu.resp_buf));
  259. spin_unlock(&session->back_lock);
  260. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  261. "Freeing tid=0x%x for cid=0x%x\n",
  262. cmd->task_id, qedi_conn->iscsi_conn_id);
  263. cmd->state = RESPONSE_RECEIVED;
  264. }
  265. static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
  266. struct iscsi_cqe_unsolicited *cqe,
  267. char *ptr, int len)
  268. {
  269. u16 idx = 0;
  270. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  271. "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
  272. len, qedi->bdq_prod_idx,
  273. (qedi->bdq_prod_idx % qedi->rq_num_entries));
  274. /* Obtain buffer address from rqe_opaque */
  275. idx = cqe->rqe_opaque;
  276. if (idx > (QEDI_BDQ_NUM - 1)) {
  277. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  278. "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
  279. idx);
  280. return;
  281. }
  282. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  283. "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
  284. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  285. "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
  286. switch (cqe->unsol_cqe_type) {
  287. case ISCSI_CQE_UNSOLICITED_SINGLE:
  288. case ISCSI_CQE_UNSOLICITED_FIRST:
  289. if (len)
  290. memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
  291. break;
  292. case ISCSI_CQE_UNSOLICITED_MIDDLE:
  293. case ISCSI_CQE_UNSOLICITED_LAST:
  294. break;
  295. default:
  296. break;
  297. }
  298. }
  299. static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
  300. struct iscsi_cqe_unsolicited *cqe,
  301. int count)
  302. {
  303. u16 idx = 0;
  304. struct scsi_bd *pbl;
  305. /* Obtain buffer address from rqe_opaque */
  306. idx = cqe->rqe_opaque;
  307. if (idx > (QEDI_BDQ_NUM - 1)) {
  308. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  309. "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
  310. idx);
  311. return;
  312. }
  313. pbl = (struct scsi_bd *)qedi->bdq_pbl;
  314. pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
  315. pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
  316. pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
  317. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  318. "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
  319. pbl, pbl->address.hi, pbl->address.lo, idx);
  320. pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
  321. pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
  322. pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
  323. pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
  324. /* Increment producer to let f/w know we've handled the frame */
  325. qedi->bdq_prod_idx += count;
  326. writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
  327. readw(qedi->bdq_primary_prod);
  328. writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
  329. readw(qedi->bdq_secondary_prod);
  330. }
  331. static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
  332. struct iscsi_cqe_unsolicited *cqe,
  333. u32 pdu_len, u32 num_bdqs,
  334. char *bdq_data)
  335. {
  336. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  337. "num_bdqs [%d]\n", num_bdqs);
  338. qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
  339. qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
  340. }
  341. static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
  342. union iscsi_cqe *cqe,
  343. struct iscsi_task *task,
  344. struct qedi_conn *qedi_conn, u16 que_idx)
  345. {
  346. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  347. struct iscsi_session *session = conn->session;
  348. struct iscsi_nop_in_hdr *cqe_nop_in;
  349. struct iscsi_nopin *hdr;
  350. struct qedi_cmd *cmd;
  351. int tgt_async_nop = 0;
  352. u32 lun[2];
  353. u32 pdu_len, num_bdqs;
  354. char bdq_data[QEDI_BDQ_BUF_SIZE];
  355. unsigned long flags;
  356. spin_lock_bh(&session->back_lock);
  357. cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
  358. pdu_len = cqe_nop_in->hdr_second_dword &
  359. ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
  360. num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
  361. hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
  362. memset(hdr, 0, sizeof(struct iscsi_hdr));
  363. hdr->opcode = cqe_nop_in->opcode;
  364. hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
  365. hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
  366. hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
  367. hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
  368. if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
  369. spin_lock_irqsave(&qedi->hba_lock, flags);
  370. qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
  371. pdu_len, num_bdqs, bdq_data);
  372. hdr->itt = RESERVED_ITT;
  373. tgt_async_nop = 1;
  374. spin_unlock_irqrestore(&qedi->hba_lock, flags);
  375. goto done;
  376. }
  377. /* Response to one of our nop-outs */
  378. if (task) {
  379. cmd = task->dd_data;
  380. hdr->flags = ISCSI_FLAG_CMD_FINAL;
  381. hdr->itt = build_itt(cqe->cqe_solicited.itid,
  382. conn->session->age);
  383. lun[0] = 0xffffffff;
  384. lun[1] = 0xffffffff;
  385. memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
  386. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  387. "Freeing tid=0x%x for cid=0x%x\n",
  388. cmd->task_id, qedi_conn->iscsi_conn_id);
  389. cmd->state = RESPONSE_RECEIVED;
  390. spin_lock(&qedi_conn->list_lock);
  391. if (likely(cmd->io_cmd_in_list)) {
  392. cmd->io_cmd_in_list = false;
  393. list_del_init(&cmd->io_cmd);
  394. qedi_conn->active_cmd_count--;
  395. }
  396. spin_unlock(&qedi_conn->list_lock);
  397. }
  398. done:
  399. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
  400. spin_unlock_bh(&session->back_lock);
  401. return tgt_async_nop;
  402. }
  403. static void qedi_process_async_mesg(struct qedi_ctx *qedi,
  404. union iscsi_cqe *cqe,
  405. struct iscsi_task *task,
  406. struct qedi_conn *qedi_conn,
  407. u16 que_idx)
  408. {
  409. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  410. struct iscsi_session *session = conn->session;
  411. struct iscsi_async_msg_hdr *cqe_async_msg;
  412. struct iscsi_async *resp_hdr;
  413. u32 lun[2];
  414. u32 pdu_len, num_bdqs;
  415. char bdq_data[QEDI_BDQ_BUF_SIZE];
  416. unsigned long flags;
  417. spin_lock_bh(&session->back_lock);
  418. cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
  419. pdu_len = cqe_async_msg->hdr_second_dword &
  420. ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
  421. num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
  422. if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
  423. spin_lock_irqsave(&qedi->hba_lock, flags);
  424. qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
  425. pdu_len, num_bdqs, bdq_data);
  426. spin_unlock_irqrestore(&qedi->hba_lock, flags);
  427. }
  428. resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
  429. memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
  430. resp_hdr->opcode = cqe_async_msg->opcode;
  431. resp_hdr->flags = 0x80;
  432. lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
  433. lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
  434. memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
  435. resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
  436. resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
  437. resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
  438. resp_hdr->async_event = cqe_async_msg->async_event;
  439. resp_hdr->async_vcode = cqe_async_msg->async_vcode;
  440. resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
  441. resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
  442. resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
  443. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
  444. pdu_len);
  445. spin_unlock_bh(&session->back_lock);
  446. }
  447. static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
  448. union iscsi_cqe *cqe,
  449. struct iscsi_task *task,
  450. struct qedi_conn *qedi_conn,
  451. uint16_t que_idx)
  452. {
  453. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  454. struct iscsi_session *session = conn->session;
  455. struct iscsi_reject_hdr *cqe_reject;
  456. struct iscsi_reject *hdr;
  457. u32 pld_len, num_bdqs;
  458. unsigned long flags;
  459. spin_lock_bh(&session->back_lock);
  460. cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
  461. pld_len = cqe_reject->hdr_second_dword &
  462. ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
  463. num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
  464. if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
  465. spin_lock_irqsave(&qedi->hba_lock, flags);
  466. qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
  467. pld_len, num_bdqs, conn->data);
  468. spin_unlock_irqrestore(&qedi->hba_lock, flags);
  469. }
  470. hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
  471. memset(hdr, 0, sizeof(struct iscsi_hdr));
  472. hdr->opcode = cqe_reject->opcode;
  473. hdr->reason = cqe_reject->hdr_reason;
  474. hdr->flags = cqe_reject->hdr_flags;
  475. hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
  476. ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
  477. hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
  478. hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
  479. hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
  480. hdr->ffffffff = cpu_to_be32(0xffffffff);
  481. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
  482. conn->data, pld_len);
  483. spin_unlock_bh(&session->back_lock);
  484. }
  485. static void qedi_scsi_completion(struct qedi_ctx *qedi,
  486. union iscsi_cqe *cqe,
  487. struct iscsi_task *task,
  488. struct iscsi_conn *conn)
  489. {
  490. struct scsi_cmnd *sc_cmd;
  491. struct qedi_cmd *cmd = task->dd_data;
  492. struct iscsi_session *session = conn->session;
  493. struct iscsi_scsi_rsp *hdr;
  494. struct iscsi_data_in_hdr *cqe_data_in;
  495. int datalen = 0;
  496. struct qedi_conn *qedi_conn;
  497. u32 iscsi_cid;
  498. u8 cqe_err_bits = 0;
  499. iscsi_cid = cqe->cqe_common.conn_id;
  500. qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  501. cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
  502. cqe_err_bits =
  503. cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
  504. spin_lock_bh(&session->back_lock);
  505. /* get the scsi command */
  506. sc_cmd = cmd->scsi_cmd;
  507. if (!sc_cmd) {
  508. QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
  509. goto error;
  510. }
  511. if (!iscsi_cmd(sc_cmd)->task) {
  512. QEDI_WARN(&qedi->dbg_ctx,
  513. "NULL task pointer, returned in another context.\n");
  514. goto error;
  515. }
  516. if (!scsi_cmd_to_rq(sc_cmd)->q) {
  517. QEDI_WARN(&qedi->dbg_ctx,
  518. "request->q is NULL so request is not valid, sc_cmd=%p.\n",
  519. sc_cmd);
  520. goto error;
  521. }
  522. qedi_iscsi_unmap_sg_list(cmd);
  523. hdr = (struct iscsi_scsi_rsp *)task->hdr;
  524. hdr->opcode = cqe_data_in->opcode;
  525. hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
  526. hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
  527. hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
  528. hdr->response = cqe_data_in->reserved1;
  529. hdr->cmd_status = cqe_data_in->status_rsvd;
  530. hdr->flags = cqe_data_in->flags;
  531. hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
  532. if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
  533. datalen = cqe_data_in->reserved2 &
  534. ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
  535. memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
  536. }
  537. /* If f/w reports data underrun err then set residual to IO transfer
  538. * length, set Underrun flag and clear Overrun flag explicitly
  539. */
  540. if (unlikely(cqe_err_bits &&
  541. GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
  542. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  543. "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
  544. hdr->itt, cqe_data_in->flags, cmd->task_id,
  545. qedi_conn->iscsi_conn_id, hdr->residual_count,
  546. scsi_bufflen(sc_cmd));
  547. hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
  548. hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
  549. hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
  550. }
  551. spin_lock(&qedi_conn->list_lock);
  552. if (likely(cmd->io_cmd_in_list)) {
  553. cmd->io_cmd_in_list = false;
  554. list_del_init(&cmd->io_cmd);
  555. qedi_conn->active_cmd_count--;
  556. }
  557. spin_unlock(&qedi_conn->list_lock);
  558. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
  559. "Freeing tid=0x%x for cid=0x%x\n",
  560. cmd->task_id, qedi_conn->iscsi_conn_id);
  561. cmd->state = RESPONSE_RECEIVED;
  562. if (qedi_io_tracing)
  563. qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
  564. __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
  565. conn->data, datalen);
  566. error:
  567. spin_unlock_bh(&session->back_lock);
  568. }
  569. static void qedi_mtask_completion(struct qedi_ctx *qedi,
  570. union iscsi_cqe *cqe,
  571. struct iscsi_task *task,
  572. struct qedi_conn *conn, uint16_t que_idx)
  573. {
  574. struct iscsi_conn *iscsi_conn;
  575. u32 hdr_opcode;
  576. hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
  577. iscsi_conn = conn->cls_conn->dd_data;
  578. switch (hdr_opcode) {
  579. case ISCSI_OPCODE_SCSI_RESPONSE:
  580. case ISCSI_OPCODE_DATA_IN:
  581. qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
  582. break;
  583. case ISCSI_OPCODE_LOGIN_RESPONSE:
  584. qedi_process_login_resp(qedi, cqe, task, conn);
  585. break;
  586. case ISCSI_OPCODE_TMF_RESPONSE:
  587. qedi_process_tmf_resp(qedi, cqe, task, conn);
  588. break;
  589. case ISCSI_OPCODE_TEXT_RESPONSE:
  590. qedi_process_text_resp(qedi, cqe, task, conn);
  591. break;
  592. case ISCSI_OPCODE_LOGOUT_RESPONSE:
  593. qedi_process_logout_resp(qedi, cqe, task, conn);
  594. break;
  595. case ISCSI_OPCODE_NOP_IN:
  596. qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
  597. break;
  598. default:
  599. QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
  600. }
  601. }
  602. static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
  603. struct iscsi_cqe_solicited *cqe,
  604. struct iscsi_task *task,
  605. struct qedi_conn *qedi_conn)
  606. {
  607. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  608. struct iscsi_session *session = conn->session;
  609. struct qedi_cmd *cmd = task->dd_data;
  610. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
  611. "itid=0x%x, cmd task id=0x%x\n",
  612. cqe->itid, cmd->task_id);
  613. cmd->state = RESPONSE_RECEIVED;
  614. spin_lock_bh(&session->back_lock);
  615. __iscsi_put_task(task);
  616. spin_unlock_bh(&session->back_lock);
  617. }
  618. static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
  619. struct iscsi_cqe_solicited *cqe,
  620. struct iscsi_conn *conn)
  621. {
  622. struct qedi_work_map *work, *work_tmp;
  623. u32 proto_itt = cqe->itid;
  624. int found = 0;
  625. struct qedi_cmd *qedi_cmd = NULL;
  626. u32 iscsi_cid;
  627. struct qedi_conn *qedi_conn;
  628. struct qedi_cmd *dbg_cmd;
  629. struct iscsi_task *mtask, *task;
  630. struct iscsi_tm *tmf_hdr = NULL;
  631. iscsi_cid = cqe->conn_id;
  632. qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  633. if (!qedi_conn) {
  634. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  635. "icid not found 0x%x\n", cqe->conn_id);
  636. return;
  637. }
  638. /* Based on this itt get the corresponding qedi_cmd */
  639. spin_lock_bh(&qedi_conn->tmf_work_lock);
  640. list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
  641. list) {
  642. if (work->rtid == proto_itt) {
  643. /* We found the command */
  644. qedi_cmd = work->qedi_cmd;
  645. if (!qedi_cmd->list_tmf_work) {
  646. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  647. "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
  648. proto_itt, qedi_conn->iscsi_conn_id);
  649. WARN_ON(1);
  650. }
  651. found = 1;
  652. mtask = qedi_cmd->task;
  653. task = work->ctask;
  654. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  655. list_del_init(&work->list);
  656. kfree(work);
  657. qedi_cmd->list_tmf_work = NULL;
  658. }
  659. }
  660. spin_unlock_bh(&qedi_conn->tmf_work_lock);
  661. if (!found)
  662. goto check_cleanup_reqs;
  663. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  664. "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
  665. proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
  666. spin_lock_bh(&conn->session->back_lock);
  667. if (iscsi_task_is_completed(task)) {
  668. QEDI_NOTICE(&qedi->dbg_ctx,
  669. "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
  670. get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id);
  671. goto unlock;
  672. }
  673. dbg_cmd = task->dd_data;
  674. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  675. "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
  676. get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id,
  677. qedi_conn->iscsi_conn_id);
  678. spin_lock(&qedi_conn->list_lock);
  679. if (likely(dbg_cmd->io_cmd_in_list)) {
  680. dbg_cmd->io_cmd_in_list = false;
  681. list_del_init(&dbg_cmd->io_cmd);
  682. qedi_conn->active_cmd_count--;
  683. }
  684. spin_unlock(&qedi_conn->list_lock);
  685. qedi_cmd->state = CLEANUP_RECV;
  686. unlock:
  687. spin_unlock_bh(&conn->session->back_lock);
  688. wake_up_interruptible(&qedi_conn->wait_queue);
  689. return;
  690. check_cleanup_reqs:
  691. if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
  692. qedi_conn->cmd_cleanup_req) {
  693. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  694. "Freeing tid=0x%x for cid=0x%x\n",
  695. cqe->itid, qedi_conn->iscsi_conn_id);
  696. wake_up(&qedi_conn->wait_queue);
  697. }
  698. }
  699. void qedi_fp_process_cqes(struct qedi_work *work)
  700. {
  701. struct qedi_ctx *qedi = work->qedi;
  702. union iscsi_cqe *cqe = &work->cqe;
  703. struct iscsi_task *task = NULL;
  704. struct iscsi_nopout *nopout_hdr;
  705. struct qedi_conn *q_conn;
  706. struct iscsi_conn *conn;
  707. struct qedi_cmd *qedi_cmd;
  708. u32 comp_type;
  709. u32 iscsi_cid;
  710. u32 hdr_opcode;
  711. u16 que_idx = work->que_idx;
  712. u8 cqe_err_bits = 0;
  713. comp_type = cqe->cqe_common.cqe_type;
  714. hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
  715. cqe_err_bits =
  716. cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
  717. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  718. "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
  719. cqe->cqe_common.conn_id, comp_type, hdr_opcode);
  720. if (comp_type >= MAX_ISCSI_CQES_TYPE) {
  721. QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
  722. return;
  723. }
  724. iscsi_cid = cqe->cqe_common.conn_id;
  725. q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  726. if (!q_conn) {
  727. QEDI_WARN(&qedi->dbg_ctx,
  728. "Session no longer exists for cid=0x%x!!\n",
  729. iscsi_cid);
  730. return;
  731. }
  732. conn = q_conn->cls_conn->dd_data;
  733. if (unlikely(cqe_err_bits &&
  734. GET_FIELD(cqe_err_bits,
  735. CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
  736. iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
  737. return;
  738. }
  739. switch (comp_type) {
  740. case ISCSI_CQE_TYPE_SOLICITED:
  741. case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
  742. qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
  743. task = qedi_cmd->task;
  744. if (!task) {
  745. QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
  746. return;
  747. }
  748. /* Process NOPIN local completion */
  749. nopout_hdr = (struct iscsi_nopout *)task->hdr;
  750. if ((nopout_hdr->itt == RESERVED_ITT) &&
  751. (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
  752. qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
  753. task, q_conn);
  754. } else {
  755. cqe->cqe_solicited.itid =
  756. qedi_get_itt(cqe->cqe_solicited);
  757. /* Process other solicited responses */
  758. qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
  759. }
  760. break;
  761. case ISCSI_CQE_TYPE_UNSOLICITED:
  762. switch (hdr_opcode) {
  763. case ISCSI_OPCODE_NOP_IN:
  764. qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
  765. que_idx);
  766. break;
  767. case ISCSI_OPCODE_ASYNC_MSG:
  768. qedi_process_async_mesg(qedi, cqe, task, q_conn,
  769. que_idx);
  770. break;
  771. case ISCSI_OPCODE_REJECT:
  772. qedi_process_reject_mesg(qedi, cqe, task, q_conn,
  773. que_idx);
  774. break;
  775. }
  776. goto exit_fp_process;
  777. case ISCSI_CQE_TYPE_DUMMY:
  778. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
  779. goto exit_fp_process;
  780. case ISCSI_CQE_TYPE_TASK_CLEANUP:
  781. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
  782. qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn);
  783. goto exit_fp_process;
  784. default:
  785. QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
  786. break;
  787. }
  788. exit_fp_process:
  789. return;
  790. }
  791. static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
  792. {
  793. qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
  794. /* wmb - Make sure fw idx is coherent */
  795. wmb();
  796. writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell);
  797. /* Make sure fw write idx is coherent, and include both memory barriers
  798. * as a failsafe as for some architectures the call is the same but on
  799. * others they are two different assembly operations.
  800. */
  801. wmb();
  802. QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
  803. "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
  804. qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
  805. qedi_conn->iscsi_conn_id);
  806. }
  807. static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
  808. {
  809. struct qedi_endpoint *ep;
  810. u16 rval;
  811. ep = qedi_conn->ep;
  812. rval = ep->sq_prod_idx;
  813. /* Increament SQ index */
  814. ep->sq_prod_idx++;
  815. ep->fw_sq_prod_idx++;
  816. if (ep->sq_prod_idx == QEDI_SQ_SIZE)
  817. ep->sq_prod_idx = 0;
  818. return rval;
  819. }
  820. int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
  821. struct iscsi_task *task)
  822. {
  823. struct iscsi_login_req_hdr login_req_pdu_header;
  824. struct scsi_sgl_task_params tx_sgl_task_params;
  825. struct scsi_sgl_task_params rx_sgl_task_params;
  826. struct iscsi_task_params task_params;
  827. struct iscsi_task_context *fw_task_ctx;
  828. struct qedi_ctx *qedi = qedi_conn->qedi;
  829. struct iscsi_login_req *login_hdr;
  830. struct scsi_sge *resp_sge = NULL;
  831. struct qedi_cmd *qedi_cmd;
  832. struct qedi_endpoint *ep;
  833. s16 tid = 0;
  834. u16 sq_idx = 0;
  835. int rval = 0;
  836. resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  837. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  838. ep = qedi_conn->ep;
  839. login_hdr = (struct iscsi_login_req *)task->hdr;
  840. tid = qedi_get_task_idx(qedi);
  841. if (tid == -1)
  842. return -ENOMEM;
  843. fw_task_ctx =
  844. (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  845. tid);
  846. memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  847. qedi_cmd->task_id = tid;
  848. memset(&task_params, 0, sizeof(task_params));
  849. memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
  850. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  851. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  852. /* Update header info */
  853. login_req_pdu_header.opcode = login_hdr->opcode;
  854. login_req_pdu_header.version_min = login_hdr->min_version;
  855. login_req_pdu_header.version_max = login_hdr->max_version;
  856. login_req_pdu_header.flags_attr = login_hdr->flags;
  857. login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
  858. login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
  859. login_req_pdu_header.tsih = login_hdr->tsih;
  860. login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
  861. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  862. login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  863. login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
  864. login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
  865. login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
  866. login_req_pdu_header.exp_stat_sn = 0;
  867. /* Fill tx AHS and rx buffer */
  868. tx_sgl_task_params.sgl =
  869. (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  870. tx_sgl_task_params.sgl_phys_addr.lo =
  871. (u32)(qedi_conn->gen_pdu.req_dma_addr);
  872. tx_sgl_task_params.sgl_phys_addr.hi =
  873. (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
  874. tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
  875. tx_sgl_task_params.num_sges = 1;
  876. rx_sgl_task_params.sgl =
  877. (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  878. rx_sgl_task_params.sgl_phys_addr.lo =
  879. (u32)(qedi_conn->gen_pdu.resp_dma_addr);
  880. rx_sgl_task_params.sgl_phys_addr.hi =
  881. (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
  882. rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
  883. rx_sgl_task_params.num_sges = 1;
  884. /* Fill fw input params */
  885. task_params.context = fw_task_ctx;
  886. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  887. task_params.itid = tid;
  888. task_params.cq_rss_number = 0;
  889. task_params.tx_io_size = ntoh24(login_hdr->dlength);
  890. task_params.rx_io_size = resp_sge->sge_len;
  891. sq_idx = qedi_get_wqe_idx(qedi_conn);
  892. task_params.sqe = &ep->sq[sq_idx];
  893. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  894. rval = init_initiator_login_request_task(&task_params,
  895. &login_req_pdu_header,
  896. &tx_sgl_task_params,
  897. &rx_sgl_task_params);
  898. if (rval)
  899. return -1;
  900. spin_lock(&qedi_conn->list_lock);
  901. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  902. qedi_cmd->io_cmd_in_list = true;
  903. qedi_conn->active_cmd_count++;
  904. spin_unlock(&qedi_conn->list_lock);
  905. qedi_ring_doorbell(qedi_conn);
  906. return 0;
  907. }
  908. int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
  909. struct iscsi_task *task)
  910. {
  911. struct iscsi_logout_req_hdr logout_pdu_header;
  912. struct scsi_sgl_task_params tx_sgl_task_params;
  913. struct scsi_sgl_task_params rx_sgl_task_params;
  914. struct iscsi_task_params task_params;
  915. struct iscsi_task_context *fw_task_ctx;
  916. struct iscsi_logout *logout_hdr = NULL;
  917. struct qedi_ctx *qedi = qedi_conn->qedi;
  918. struct qedi_cmd *qedi_cmd;
  919. struct qedi_endpoint *ep;
  920. s16 tid = 0;
  921. u16 sq_idx = 0;
  922. int rval = 0;
  923. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  924. logout_hdr = (struct iscsi_logout *)task->hdr;
  925. ep = qedi_conn->ep;
  926. tid = qedi_get_task_idx(qedi);
  927. if (tid == -1)
  928. return -ENOMEM;
  929. fw_task_ctx =
  930. (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  931. tid);
  932. memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  933. qedi_cmd->task_id = tid;
  934. memset(&task_params, 0, sizeof(task_params));
  935. memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
  936. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  937. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  938. /* Update header info */
  939. logout_pdu_header.opcode = logout_hdr->opcode;
  940. logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
  941. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  942. logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  943. logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
  944. logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
  945. logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
  946. /* Fill fw input params */
  947. task_params.context = fw_task_ctx;
  948. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  949. task_params.itid = tid;
  950. task_params.cq_rss_number = 0;
  951. task_params.tx_io_size = 0;
  952. task_params.rx_io_size = 0;
  953. sq_idx = qedi_get_wqe_idx(qedi_conn);
  954. task_params.sqe = &ep->sq[sq_idx];
  955. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  956. rval = init_initiator_logout_request_task(&task_params,
  957. &logout_pdu_header,
  958. NULL, NULL);
  959. if (rval)
  960. return -1;
  961. spin_lock(&qedi_conn->list_lock);
  962. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  963. qedi_cmd->io_cmd_in_list = true;
  964. qedi_conn->active_cmd_count++;
  965. spin_unlock(&qedi_conn->list_lock);
  966. qedi_ring_doorbell(qedi_conn);
  967. return 0;
  968. }
  969. int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
  970. struct iscsi_task *task, bool in_recovery)
  971. {
  972. int rval;
  973. struct iscsi_task *ctask;
  974. struct qedi_cmd *cmd, *cmd_tmp;
  975. struct iscsi_tm *tmf_hdr;
  976. unsigned int lun = 0;
  977. bool lun_reset = false;
  978. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  979. struct iscsi_session *session = conn->session;
  980. /* From recovery, task is NULL or from tmf resp valid task */
  981. if (task) {
  982. tmf_hdr = (struct iscsi_tm *)task->hdr;
  983. if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  984. ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
  985. lun_reset = true;
  986. lun = scsilun_to_int(&tmf_hdr->lun);
  987. }
  988. }
  989. qedi_conn->cmd_cleanup_req = 0;
  990. atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
  991. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  992. "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
  993. qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
  994. in_recovery, lun_reset);
  995. if (lun_reset)
  996. spin_lock_bh(&session->back_lock);
  997. spin_lock(&qedi_conn->list_lock);
  998. list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
  999. io_cmd) {
  1000. ctask = cmd->task;
  1001. if (ctask == task)
  1002. continue;
  1003. if (lun_reset) {
  1004. if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
  1005. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1006. "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
  1007. cmd->task_id, get_itt(ctask->itt),
  1008. cmd->scsi_cmd, cmd->scsi_cmd->device,
  1009. ctask->state, cmd->state,
  1010. qedi_conn->iscsi_conn_id);
  1011. if (cmd->scsi_cmd->device->lun != lun)
  1012. continue;
  1013. }
  1014. }
  1015. qedi_conn->cmd_cleanup_req++;
  1016. qedi_iscsi_cleanup_task(ctask, true);
  1017. cmd->io_cmd_in_list = false;
  1018. list_del_init(&cmd->io_cmd);
  1019. qedi_conn->active_cmd_count--;
  1020. QEDI_WARN(&qedi->dbg_ctx,
  1021. "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
  1022. &cmd->io_cmd, qedi_conn->iscsi_conn_id);
  1023. }
  1024. spin_unlock(&qedi_conn->list_lock);
  1025. if (lun_reset)
  1026. spin_unlock_bh(&session->back_lock);
  1027. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1028. "cmd_cleanup_req=%d, cid=0x%x\n",
  1029. qedi_conn->cmd_cleanup_req,
  1030. qedi_conn->iscsi_conn_id);
  1031. rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
  1032. (qedi_conn->cmd_cleanup_req ==
  1033. atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
  1034. test_bit(QEDI_IN_RECOVERY, &qedi->flags),
  1035. 5 * HZ);
  1036. if (rval) {
  1037. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1038. "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
  1039. qedi_conn->cmd_cleanup_req,
  1040. atomic_read(&qedi_conn->cmd_cleanup_cmpl),
  1041. qedi_conn->iscsi_conn_id);
  1042. return 0;
  1043. }
  1044. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1045. "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
  1046. qedi_conn->cmd_cleanup_req,
  1047. atomic_read(&qedi_conn->cmd_cleanup_cmpl),
  1048. qedi_conn->iscsi_conn_id);
  1049. iscsi_host_for_each_session(qedi->shost,
  1050. qedi_mark_device_missing);
  1051. qedi_ops->common->drain(qedi->cdev);
  1052. /* Enable IOs for all other sessions except current.*/
  1053. if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
  1054. (qedi_conn->cmd_cleanup_req ==
  1055. atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
  1056. test_bit(QEDI_IN_RECOVERY, &qedi->flags),
  1057. 5 * HZ)) {
  1058. iscsi_host_for_each_session(qedi->shost,
  1059. qedi_mark_device_available);
  1060. return -1;
  1061. }
  1062. iscsi_host_for_each_session(qedi->shost,
  1063. qedi_mark_device_available);
  1064. return 0;
  1065. }
  1066. void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
  1067. struct iscsi_task *task)
  1068. {
  1069. struct qedi_endpoint *qedi_ep;
  1070. int rval;
  1071. qedi_ep = qedi_conn->ep;
  1072. qedi_conn->cmd_cleanup_req = 0;
  1073. atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
  1074. if (!qedi_ep) {
  1075. QEDI_WARN(&qedi->dbg_ctx,
  1076. "Cannot proceed, ep already disconnected, cid=0x%x\n",
  1077. qedi_conn->iscsi_conn_id);
  1078. return;
  1079. }
  1080. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1081. "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
  1082. qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
  1083. qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
  1084. rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
  1085. if (rval) {
  1086. QEDI_ERR(&qedi->dbg_ctx,
  1087. "fatal error, need hard reset, cid=0x%x\n",
  1088. qedi_conn->iscsi_conn_id);
  1089. WARN_ON(1);
  1090. }
  1091. }
  1092. static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
  1093. struct qedi_conn *qedi_conn,
  1094. struct iscsi_task *task,
  1095. struct qedi_cmd *qedi_cmd,
  1096. struct qedi_work_map *list_work)
  1097. {
  1098. struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
  1099. int wait;
  1100. wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
  1101. ((qedi_cmd->state ==
  1102. CLEANUP_RECV) ||
  1103. ((qedi_cmd->type == TYPEIO) &&
  1104. (cmd->state ==
  1105. RESPONSE_RECEIVED))),
  1106. 5 * HZ);
  1107. if (!wait) {
  1108. qedi_cmd->state = CLEANUP_WAIT_FAILED;
  1109. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1110. "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
  1111. cmd->task_id, qedi_conn->iscsi_conn_id);
  1112. return -1;
  1113. }
  1114. return 0;
  1115. }
  1116. static void qedi_abort_work(struct work_struct *work)
  1117. {
  1118. struct qedi_cmd *qedi_cmd =
  1119. container_of(work, struct qedi_cmd, tmf_work);
  1120. struct qedi_conn *qedi_conn = qedi_cmd->conn;
  1121. struct qedi_ctx *qedi = qedi_conn->qedi;
  1122. struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
  1123. struct qedi_work_map *list_work = NULL;
  1124. struct iscsi_task *mtask;
  1125. struct qedi_cmd *cmd;
  1126. struct iscsi_task *ctask;
  1127. struct iscsi_tm *tmf_hdr;
  1128. s16 rval = 0;
  1129. mtask = qedi_cmd->task;
  1130. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  1131. spin_lock_bh(&conn->session->back_lock);
  1132. ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt);
  1133. if (!ctask) {
  1134. spin_unlock_bh(&conn->session->back_lock);
  1135. QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n");
  1136. goto clear_cleanup;
  1137. }
  1138. if (iscsi_task_is_completed(ctask)) {
  1139. spin_unlock_bh(&conn->session->back_lock);
  1140. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1141. "Task already completed\n");
  1142. /*
  1143. * We have to still send the TMF because libiscsi needs the
  1144. * response to avoid a timeout.
  1145. */
  1146. goto send_tmf;
  1147. }
  1148. spin_unlock_bh(&conn->session->back_lock);
  1149. cmd = (struct qedi_cmd *)ctask->dd_data;
  1150. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1151. "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
  1152. get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
  1153. qedi_conn->iscsi_conn_id);
  1154. if (qedi_do_not_recover) {
  1155. QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
  1156. qedi_do_not_recover);
  1157. goto clear_cleanup;
  1158. }
  1159. list_work = kzalloc(sizeof(*list_work), GFP_NOIO);
  1160. if (!list_work) {
  1161. QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
  1162. goto clear_cleanup;
  1163. }
  1164. qedi_cmd->type = TYPEIO;
  1165. qedi_cmd->state = CLEANUP_WAIT;
  1166. list_work->qedi_cmd = qedi_cmd;
  1167. list_work->rtid = cmd->task_id;
  1168. list_work->state = QEDI_WORK_SCHEDULED;
  1169. list_work->ctask = ctask;
  1170. qedi_cmd->list_tmf_work = list_work;
  1171. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1172. "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
  1173. list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
  1174. tmf_hdr->flags);
  1175. spin_lock_bh(&qedi_conn->tmf_work_lock);
  1176. list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
  1177. spin_unlock_bh(&qedi_conn->tmf_work_lock);
  1178. qedi_iscsi_cleanup_task(ctask, false);
  1179. rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
  1180. list_work);
  1181. if (rval == -1) {
  1182. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1183. "FW cleanup got escalated, cid=0x%x\n",
  1184. qedi_conn->iscsi_conn_id);
  1185. goto ldel_exit;
  1186. }
  1187. send_tmf:
  1188. send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask);
  1189. goto clear_cleanup;
  1190. ldel_exit:
  1191. spin_lock_bh(&qedi_conn->tmf_work_lock);
  1192. if (qedi_cmd->list_tmf_work) {
  1193. list_del_init(&list_work->list);
  1194. qedi_cmd->list_tmf_work = NULL;
  1195. kfree(list_work);
  1196. }
  1197. spin_unlock_bh(&qedi_conn->tmf_work_lock);
  1198. spin_lock(&qedi_conn->list_lock);
  1199. if (likely(cmd->io_cmd_in_list)) {
  1200. cmd->io_cmd_in_list = false;
  1201. list_del_init(&cmd->io_cmd);
  1202. qedi_conn->active_cmd_count--;
  1203. }
  1204. spin_unlock(&qedi_conn->list_lock);
  1205. clear_cleanup:
  1206. spin_lock(&qedi_conn->tmf_work_lock);
  1207. qedi_conn->fw_cleanup_works--;
  1208. spin_unlock(&qedi_conn->tmf_work_lock);
  1209. }
  1210. static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
  1211. struct iscsi_task *ctask)
  1212. {
  1213. struct iscsi_tmf_request_hdr tmf_pdu_header;
  1214. struct iscsi_task_params task_params;
  1215. struct qedi_ctx *qedi = qedi_conn->qedi;
  1216. struct iscsi_task_context *fw_task_ctx;
  1217. struct iscsi_tm *tmf_hdr;
  1218. struct qedi_cmd *qedi_cmd;
  1219. struct qedi_cmd *cmd;
  1220. struct qedi_endpoint *ep;
  1221. u32 scsi_lun[2];
  1222. s16 tid = 0;
  1223. u16 sq_idx = 0;
  1224. tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  1225. qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
  1226. ep = qedi_conn->ep;
  1227. if (!ep)
  1228. return -ENODEV;
  1229. tid = qedi_get_task_idx(qedi);
  1230. if (tid == -1)
  1231. return -ENOMEM;
  1232. fw_task_ctx =
  1233. (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1234. tid);
  1235. memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  1236. qedi_cmd->task_id = tid;
  1237. memset(&task_params, 0, sizeof(task_params));
  1238. memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
  1239. /* Update header info */
  1240. qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
  1241. tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
  1242. tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
  1243. memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
  1244. tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
  1245. tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  1246. if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
  1247. ISCSI_TM_FUNC_ABORT_TASK) {
  1248. cmd = (struct qedi_cmd *)ctask->dd_data;
  1249. tmf_pdu_header.rtt =
  1250. qedi_set_itt(cmd->task_id,
  1251. get_itt(tmf_hdr->rtt));
  1252. } else {
  1253. tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
  1254. }
  1255. tmf_pdu_header.opcode = tmf_hdr->opcode;
  1256. tmf_pdu_header.function = tmf_hdr->flags;
  1257. tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
  1258. tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
  1259. /* Fill fw input params */
  1260. task_params.context = fw_task_ctx;
  1261. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1262. task_params.itid = tid;
  1263. task_params.cq_rss_number = 0;
  1264. task_params.tx_io_size = 0;
  1265. task_params.rx_io_size = 0;
  1266. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1267. task_params.sqe = &ep->sq[sq_idx];
  1268. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1269. init_initiator_tmf_request_task(&task_params, &tmf_pdu_header);
  1270. spin_lock(&qedi_conn->list_lock);
  1271. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1272. qedi_cmd->io_cmd_in_list = true;
  1273. qedi_conn->active_cmd_count++;
  1274. spin_unlock(&qedi_conn->list_lock);
  1275. qedi_ring_doorbell(qedi_conn);
  1276. return 0;
  1277. }
  1278. int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask)
  1279. {
  1280. struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr;
  1281. struct qedi_cmd *qedi_cmd = mtask->dd_data;
  1282. struct qedi_ctx *qedi = qedi_conn->qedi;
  1283. int rc = 0;
  1284. switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
  1285. case ISCSI_TM_FUNC_ABORT_TASK:
  1286. spin_lock(&qedi_conn->tmf_work_lock);
  1287. qedi_conn->fw_cleanup_works++;
  1288. spin_unlock(&qedi_conn->tmf_work_lock);
  1289. INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work);
  1290. queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
  1291. break;
  1292. case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
  1293. case ISCSI_TM_FUNC_TARGET_WARM_RESET:
  1294. case ISCSI_TM_FUNC_TARGET_COLD_RESET:
  1295. rc = send_iscsi_tmf(qedi_conn, mtask, NULL);
  1296. break;
  1297. default:
  1298. QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
  1299. qedi_conn->iscsi_conn_id);
  1300. return -EINVAL;
  1301. }
  1302. return rc;
  1303. }
  1304. int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
  1305. struct iscsi_task *task)
  1306. {
  1307. struct iscsi_text_request_hdr text_request_pdu_header;
  1308. struct scsi_sgl_task_params tx_sgl_task_params;
  1309. struct scsi_sgl_task_params rx_sgl_task_params;
  1310. struct iscsi_task_params task_params;
  1311. struct iscsi_task_context *fw_task_ctx;
  1312. struct qedi_ctx *qedi = qedi_conn->qedi;
  1313. struct iscsi_text *text_hdr;
  1314. struct scsi_sge *req_sge = NULL;
  1315. struct scsi_sge *resp_sge = NULL;
  1316. struct qedi_cmd *qedi_cmd;
  1317. struct qedi_endpoint *ep;
  1318. s16 tid = 0;
  1319. u16 sq_idx = 0;
  1320. int rval = 0;
  1321. req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  1322. resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1323. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  1324. text_hdr = (struct iscsi_text *)task->hdr;
  1325. ep = qedi_conn->ep;
  1326. tid = qedi_get_task_idx(qedi);
  1327. if (tid == -1)
  1328. return -ENOMEM;
  1329. fw_task_ctx =
  1330. (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1331. tid);
  1332. memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  1333. qedi_cmd->task_id = tid;
  1334. memset(&task_params, 0, sizeof(task_params));
  1335. memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
  1336. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  1337. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1338. /* Update header info */
  1339. text_request_pdu_header.opcode = text_hdr->opcode;
  1340. text_request_pdu_header.flags_attr = text_hdr->flags;
  1341. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  1342. text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1343. text_request_pdu_header.ttt = text_hdr->ttt;
  1344. text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
  1345. text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
  1346. text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
  1347. /* Fill tx AHS and rx buffer */
  1348. tx_sgl_task_params.sgl =
  1349. (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  1350. tx_sgl_task_params.sgl_phys_addr.lo =
  1351. (u32)(qedi_conn->gen_pdu.req_dma_addr);
  1352. tx_sgl_task_params.sgl_phys_addr.hi =
  1353. (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
  1354. tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
  1355. tx_sgl_task_params.num_sges = 1;
  1356. rx_sgl_task_params.sgl =
  1357. (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1358. rx_sgl_task_params.sgl_phys_addr.lo =
  1359. (u32)(qedi_conn->gen_pdu.resp_dma_addr);
  1360. rx_sgl_task_params.sgl_phys_addr.hi =
  1361. (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
  1362. rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
  1363. rx_sgl_task_params.num_sges = 1;
  1364. /* Fill fw input params */
  1365. task_params.context = fw_task_ctx;
  1366. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1367. task_params.itid = tid;
  1368. task_params.cq_rss_number = 0;
  1369. task_params.tx_io_size = ntoh24(text_hdr->dlength);
  1370. task_params.rx_io_size = resp_sge->sge_len;
  1371. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1372. task_params.sqe = &ep->sq[sq_idx];
  1373. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1374. rval = init_initiator_text_request_task(&task_params,
  1375. &text_request_pdu_header,
  1376. &tx_sgl_task_params,
  1377. &rx_sgl_task_params);
  1378. if (rval)
  1379. return -1;
  1380. spin_lock(&qedi_conn->list_lock);
  1381. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1382. qedi_cmd->io_cmd_in_list = true;
  1383. qedi_conn->active_cmd_count++;
  1384. spin_unlock(&qedi_conn->list_lock);
  1385. qedi_ring_doorbell(qedi_conn);
  1386. return 0;
  1387. }
  1388. int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
  1389. struct iscsi_task *task,
  1390. char *datap, int data_len, int unsol)
  1391. {
  1392. struct iscsi_nop_out_hdr nop_out_pdu_header;
  1393. struct scsi_sgl_task_params tx_sgl_task_params;
  1394. struct scsi_sgl_task_params rx_sgl_task_params;
  1395. struct iscsi_task_params task_params;
  1396. struct qedi_ctx *qedi = qedi_conn->qedi;
  1397. struct iscsi_task_context *fw_task_ctx;
  1398. struct iscsi_nopout *nopout_hdr;
  1399. struct scsi_sge *resp_sge = NULL;
  1400. struct qedi_cmd *qedi_cmd;
  1401. struct qedi_endpoint *ep;
  1402. u32 scsi_lun[2];
  1403. s16 tid = 0;
  1404. u16 sq_idx = 0;
  1405. int rval = 0;
  1406. resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1407. qedi_cmd = (struct qedi_cmd *)task->dd_data;
  1408. nopout_hdr = (struct iscsi_nopout *)task->hdr;
  1409. ep = qedi_conn->ep;
  1410. tid = qedi_get_task_idx(qedi);
  1411. if (tid == -1)
  1412. return -ENOMEM;
  1413. fw_task_ctx =
  1414. (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1415. tid);
  1416. memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  1417. qedi_cmd->task_id = tid;
  1418. memset(&task_params, 0, sizeof(task_params));
  1419. memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
  1420. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  1421. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1422. /* Update header info */
  1423. nop_out_pdu_header.opcode = nopout_hdr->opcode;
  1424. SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
  1425. SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
  1426. memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
  1427. nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
  1428. nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  1429. nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
  1430. nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
  1431. qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  1432. if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
  1433. nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
  1434. nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
  1435. } else {
  1436. nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1437. nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
  1438. spin_lock(&qedi_conn->list_lock);
  1439. list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
  1440. qedi_cmd->io_cmd_in_list = true;
  1441. qedi_conn->active_cmd_count++;
  1442. spin_unlock(&qedi_conn->list_lock);
  1443. }
  1444. /* Fill tx AHS and rx buffer */
  1445. if (data_len) {
  1446. tx_sgl_task_params.sgl =
  1447. (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  1448. tx_sgl_task_params.sgl_phys_addr.lo =
  1449. (u32)(qedi_conn->gen_pdu.req_dma_addr);
  1450. tx_sgl_task_params.sgl_phys_addr.hi =
  1451. (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
  1452. tx_sgl_task_params.total_buffer_size = data_len;
  1453. tx_sgl_task_params.num_sges = 1;
  1454. rx_sgl_task_params.sgl =
  1455. (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
  1456. rx_sgl_task_params.sgl_phys_addr.lo =
  1457. (u32)(qedi_conn->gen_pdu.resp_dma_addr);
  1458. rx_sgl_task_params.sgl_phys_addr.hi =
  1459. (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
  1460. rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
  1461. rx_sgl_task_params.num_sges = 1;
  1462. }
  1463. /* Fill fw input params */
  1464. task_params.context = fw_task_ctx;
  1465. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1466. task_params.itid = tid;
  1467. task_params.cq_rss_number = 0;
  1468. task_params.tx_io_size = data_len;
  1469. task_params.rx_io_size = resp_sge->sge_len;
  1470. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1471. task_params.sqe = &ep->sq[sq_idx];
  1472. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1473. rval = init_initiator_nop_out_task(&task_params,
  1474. &nop_out_pdu_header,
  1475. &tx_sgl_task_params,
  1476. &rx_sgl_task_params);
  1477. if (rval)
  1478. return -1;
  1479. qedi_ring_doorbell(qedi_conn);
  1480. return 0;
  1481. }
  1482. static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
  1483. int bd_index)
  1484. {
  1485. struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  1486. int frag_size, sg_frags;
  1487. sg_frags = 0;
  1488. while (sg_len) {
  1489. if (addr % QEDI_PAGE_SIZE)
  1490. frag_size =
  1491. (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
  1492. else
  1493. frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
  1494. (sg_len % QEDI_BD_SPLIT_SZ);
  1495. if (frag_size == 0)
  1496. frag_size = QEDI_BD_SPLIT_SZ;
  1497. bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
  1498. bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
  1499. bd[bd_index + sg_frags].sge_len = (u16)frag_size;
  1500. QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
  1501. "split sge %d: addr=%llx, len=%x",
  1502. (bd_index + sg_frags), addr, frag_size);
  1503. addr += (u64)frag_size;
  1504. sg_frags++;
  1505. sg_len -= frag_size;
  1506. }
  1507. return sg_frags;
  1508. }
  1509. static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
  1510. {
  1511. struct scsi_cmnd *sc = cmd->scsi_cmd;
  1512. struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  1513. struct scatterlist *sg;
  1514. int byte_count = 0;
  1515. int bd_count = 0;
  1516. int sg_count;
  1517. int sg_len;
  1518. int sg_frags;
  1519. u64 addr, end_addr;
  1520. int i;
  1521. WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
  1522. sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
  1523. scsi_sg_count(sc), sc->sc_data_direction);
  1524. /*
  1525. * New condition to send single SGE as cached-SGL.
  1526. * Single SGE with length less than 64K.
  1527. */
  1528. sg = scsi_sglist(sc);
  1529. if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
  1530. sg_len = sg_dma_len(sg);
  1531. addr = (u64)sg_dma_address(sg);
  1532. bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
  1533. bd[bd_count].sge_addr.hi = (addr >> 32);
  1534. bd[bd_count].sge_len = (u16)sg_len;
  1535. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  1536. "single-cached-sgl: bd_count:%d addr=%llx, len=%x",
  1537. sg_count, addr, sg_len);
  1538. return ++bd_count;
  1539. }
  1540. scsi_for_each_sg(sc, sg, sg_count, i) {
  1541. sg_len = sg_dma_len(sg);
  1542. addr = (u64)sg_dma_address(sg);
  1543. end_addr = (addr + sg_len);
  1544. /*
  1545. * first sg elem in the 'list',
  1546. * check if end addr is page-aligned.
  1547. */
  1548. if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
  1549. cmd->use_slowpath = true;
  1550. /*
  1551. * last sg elem in the 'list',
  1552. * check if start addr is page-aligned.
  1553. */
  1554. else if ((i == (sg_count - 1)) &&
  1555. (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
  1556. cmd->use_slowpath = true;
  1557. /*
  1558. * middle sg elements in list,
  1559. * check if start and end addr is page-aligned
  1560. */
  1561. else if ((i != 0) && (i != (sg_count - 1)) &&
  1562. ((addr % QEDI_PAGE_SIZE) ||
  1563. (end_addr % QEDI_PAGE_SIZE)))
  1564. cmd->use_slowpath = true;
  1565. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
  1566. i, sg_len);
  1567. if (sg_len > QEDI_BD_SPLIT_SZ) {
  1568. sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
  1569. } else {
  1570. sg_frags = 1;
  1571. bd[bd_count].sge_addr.lo = addr & 0xffffffff;
  1572. bd[bd_count].sge_addr.hi = addr >> 32;
  1573. bd[bd_count].sge_len = sg_len;
  1574. }
  1575. byte_count += sg_len;
  1576. bd_count += sg_frags;
  1577. }
  1578. if (byte_count != scsi_bufflen(sc))
  1579. QEDI_ERR(&qedi->dbg_ctx,
  1580. "byte_count = %d != scsi_bufflen = %d\n", byte_count,
  1581. scsi_bufflen(sc));
  1582. else
  1583. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
  1584. byte_count);
  1585. WARN_ON(byte_count != scsi_bufflen(sc));
  1586. return bd_count;
  1587. }
  1588. static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
  1589. {
  1590. int bd_count;
  1591. struct scsi_cmnd *sc = cmd->scsi_cmd;
  1592. if (scsi_sg_count(sc)) {
  1593. bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
  1594. if (bd_count == 0)
  1595. return;
  1596. } else {
  1597. struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  1598. bd[0].sge_addr.lo = 0;
  1599. bd[0].sge_addr.hi = 0;
  1600. bd[0].sge_len = 0;
  1601. bd_count = 0;
  1602. }
  1603. cmd->io_tbl.sge_valid = bd_count;
  1604. }
  1605. static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
  1606. {
  1607. u32 dword;
  1608. int lpcnt;
  1609. u8 *srcp;
  1610. lpcnt = sc->cmd_len / sizeof(dword);
  1611. srcp = (u8 *)sc->cmnd;
  1612. while (lpcnt--) {
  1613. memcpy(&dword, (const void *)srcp, 4);
  1614. *dstp = cpu_to_be32(dword);
  1615. srcp += 4;
  1616. dstp++;
  1617. }
  1618. if (sc->cmd_len & 0x3) {
  1619. dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
  1620. *dstp = cpu_to_be32(dword);
  1621. }
  1622. }
  1623. void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
  1624. u16 tid, int8_t direction)
  1625. {
  1626. struct qedi_io_log *io_log;
  1627. struct iscsi_conn *conn = task->conn;
  1628. struct qedi_conn *qedi_conn = conn->dd_data;
  1629. struct scsi_cmnd *sc_cmd = task->sc;
  1630. unsigned long flags;
  1631. spin_lock_irqsave(&qedi->io_trace_lock, flags);
  1632. io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
  1633. io_log->direction = direction;
  1634. io_log->task_id = tid;
  1635. io_log->cid = qedi_conn->iscsi_conn_id;
  1636. io_log->lun = sc_cmd->device->lun;
  1637. io_log->op = sc_cmd->cmnd[0];
  1638. io_log->lba[0] = sc_cmd->cmnd[2];
  1639. io_log->lba[1] = sc_cmd->cmnd[3];
  1640. io_log->lba[2] = sc_cmd->cmnd[4];
  1641. io_log->lba[3] = sc_cmd->cmnd[5];
  1642. io_log->bufflen = scsi_bufflen(sc_cmd);
  1643. io_log->sg_count = scsi_sg_count(sc_cmd);
  1644. io_log->fast_sgs = qedi->fast_sgls;
  1645. io_log->cached_sgs = qedi->cached_sgls;
  1646. io_log->slow_sgs = qedi->slow_sgls;
  1647. io_log->cached_sge = qedi->use_cached_sge;
  1648. io_log->slow_sge = qedi->use_slow_sge;
  1649. io_log->fast_sge = qedi->use_fast_sge;
  1650. io_log->result = sc_cmd->result;
  1651. io_log->jiffies = jiffies;
  1652. io_log->blk_req_cpu = smp_processor_id();
  1653. if (direction == QEDI_IO_TRACE_REQ) {
  1654. /* For requests we only care about the submission CPU */
  1655. io_log->req_cpu = smp_processor_id() % qedi->num_queues;
  1656. io_log->intr_cpu = 0;
  1657. io_log->blk_rsp_cpu = 0;
  1658. } else if (direction == QEDI_IO_TRACE_RSP) {
  1659. io_log->req_cpu = smp_processor_id() % qedi->num_queues;
  1660. io_log->intr_cpu = qedi->intr_cpu;
  1661. io_log->blk_rsp_cpu = smp_processor_id();
  1662. }
  1663. qedi->io_trace_idx++;
  1664. if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
  1665. qedi->io_trace_idx = 0;
  1666. qedi->use_cached_sge = false;
  1667. qedi->use_slow_sge = false;
  1668. qedi->use_fast_sge = false;
  1669. spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
  1670. }
  1671. int qedi_iscsi_send_ioreq(struct iscsi_task *task)
  1672. {
  1673. struct iscsi_conn *conn = task->conn;
  1674. struct iscsi_session *session = conn->session;
  1675. struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
  1676. struct qedi_ctx *qedi = iscsi_host_priv(shost);
  1677. struct qedi_conn *qedi_conn = conn->dd_data;
  1678. struct qedi_cmd *cmd = task->dd_data;
  1679. struct scsi_cmnd *sc = task->sc;
  1680. struct iscsi_cmd_hdr cmd_pdu_header;
  1681. struct scsi_sgl_task_params tx_sgl_task_params;
  1682. struct scsi_sgl_task_params rx_sgl_task_params;
  1683. struct scsi_sgl_task_params *prx_sgl = NULL;
  1684. struct scsi_sgl_task_params *ptx_sgl = NULL;
  1685. struct iscsi_task_params task_params;
  1686. struct iscsi_conn_params conn_params;
  1687. struct scsi_initiator_cmd_params cmd_params;
  1688. struct iscsi_task_context *fw_task_ctx;
  1689. struct iscsi_cls_conn *cls_conn;
  1690. struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
  1691. enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
  1692. struct qedi_endpoint *ep;
  1693. u32 scsi_lun[2];
  1694. s16 tid = 0;
  1695. u16 sq_idx = 0;
  1696. u16 cq_idx;
  1697. int rval = 0;
  1698. ep = qedi_conn->ep;
  1699. cls_conn = qedi_conn->cls_conn;
  1700. conn = cls_conn->dd_data;
  1701. qedi_iscsi_map_sg_list(cmd);
  1702. int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
  1703. tid = qedi_get_task_idx(qedi);
  1704. if (tid == -1)
  1705. return -ENOMEM;
  1706. fw_task_ctx =
  1707. (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
  1708. tid);
  1709. memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  1710. cmd->task_id = tid;
  1711. memset(&task_params, 0, sizeof(task_params));
  1712. memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
  1713. memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
  1714. memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  1715. memset(&conn_params, 0, sizeof(conn_params));
  1716. memset(&cmd_params, 0, sizeof(cmd_params));
  1717. cq_idx = smp_processor_id() % qedi->num_queues;
  1718. /* Update header info */
  1719. SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
  1720. ISCSI_ATTR_SIMPLE);
  1721. if (hdr->cdb[0] != TEST_UNIT_READY) {
  1722. if (sc->sc_data_direction == DMA_TO_DEVICE) {
  1723. SET_FIELD(cmd_pdu_header.flags_attr,
  1724. ISCSI_CMD_HDR_WRITE, 1);
  1725. task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
  1726. } else {
  1727. SET_FIELD(cmd_pdu_header.flags_attr,
  1728. ISCSI_CMD_HDR_READ, 1);
  1729. task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
  1730. }
  1731. }
  1732. cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
  1733. cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  1734. qedi_update_itt_map(qedi, tid, task->itt, cmd);
  1735. cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
  1736. cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
  1737. cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
  1738. cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
  1739. cmd_pdu_header.hdr_first_byte = hdr->opcode;
  1740. qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
  1741. /* Fill tx AHS and rx buffer */
  1742. if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
  1743. tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
  1744. tx_sgl_task_params.sgl_phys_addr.lo =
  1745. (u32)(cmd->io_tbl.sge_tbl_dma);
  1746. tx_sgl_task_params.sgl_phys_addr.hi =
  1747. (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
  1748. tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
  1749. tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
  1750. if (cmd->use_slowpath)
  1751. tx_sgl_task_params.small_mid_sge = true;
  1752. } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
  1753. rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
  1754. rx_sgl_task_params.sgl_phys_addr.lo =
  1755. (u32)(cmd->io_tbl.sge_tbl_dma);
  1756. rx_sgl_task_params.sgl_phys_addr.hi =
  1757. (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
  1758. rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
  1759. rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
  1760. }
  1761. /* Add conn param */
  1762. conn_params.first_burst_length = conn->session->first_burst;
  1763. conn_params.max_send_pdu_length = conn->max_xmit_dlength;
  1764. conn_params.max_burst_length = conn->session->max_burst;
  1765. if (conn->session->initial_r2t_en)
  1766. conn_params.initial_r2t = true;
  1767. if (conn->session->imm_data_en)
  1768. conn_params.immediate_data = true;
  1769. /* Add cmd params */
  1770. cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
  1771. cmd_params.sense_data_buffer_phys_addr.hi =
  1772. (u32)((u64)cmd->sense_buffer_dma >> 32);
  1773. /* Fill fw input params */
  1774. task_params.context = fw_task_ctx;
  1775. task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
  1776. task_params.itid = tid;
  1777. task_params.cq_rss_number = cq_idx;
  1778. if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
  1779. task_params.tx_io_size = scsi_bufflen(sc);
  1780. else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
  1781. task_params.rx_io_size = scsi_bufflen(sc);
  1782. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1783. task_params.sqe = &ep->sq[sq_idx];
  1784. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  1785. "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
  1786. (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
  1787. "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
  1788. "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
  1789. (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
  1790. (u32)(cmd->io_tbl.sge_tbl_dma),
  1791. (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
  1792. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1793. if (task_params.tx_io_size != 0)
  1794. ptx_sgl = &tx_sgl_task_params;
  1795. if (task_params.rx_io_size != 0)
  1796. prx_sgl = &rx_sgl_task_params;
  1797. rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
  1798. &cmd_params, &cmd_pdu_header,
  1799. ptx_sgl, prx_sgl,
  1800. NULL);
  1801. if (rval)
  1802. return -1;
  1803. spin_lock(&qedi_conn->list_lock);
  1804. list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
  1805. cmd->io_cmd_in_list = true;
  1806. qedi_conn->active_cmd_count++;
  1807. spin_unlock(&qedi_conn->list_lock);
  1808. qedi_ring_doorbell(qedi_conn);
  1809. return 0;
  1810. }
  1811. int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
  1812. {
  1813. struct iscsi_task_params task_params;
  1814. struct qedi_endpoint *ep;
  1815. struct iscsi_conn *conn = task->conn;
  1816. struct qedi_conn *qedi_conn = conn->dd_data;
  1817. struct qedi_cmd *cmd = task->dd_data;
  1818. u16 sq_idx = 0;
  1819. int rval = 0;
  1820. QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
  1821. "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
  1822. cmd->task_id, get_itt(task->itt), task->state,
  1823. cmd->state, qedi_conn->iscsi_conn_id);
  1824. memset(&task_params, 0, sizeof(task_params));
  1825. ep = qedi_conn->ep;
  1826. sq_idx = qedi_get_wqe_idx(qedi_conn);
  1827. task_params.sqe = &ep->sq[sq_idx];
  1828. memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
  1829. task_params.itid = cmd->task_id;
  1830. rval = init_cleanup_task(&task_params);
  1831. if (rval)
  1832. return rval;
  1833. qedi_ring_doorbell(qedi_conn);
  1834. return 0;
  1835. }