ql4_iocb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic iSCSI HBA Driver
  4. * Copyright (c) 2003-2013 QLogic Corporation
  5. */
  6. #include "ql4_def.h"
  7. #include "ql4_glbl.h"
  8. #include "ql4_dbg.h"
  9. #include "ql4_inline.h"
  10. #include <scsi/scsi_tcq.h>
  11. static int
  12. qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
  13. {
  14. uint16_t cnt;
  15. /* Calculate number of free request entries. */
  16. if ((req_cnt + 2) >= ha->req_q_count) {
  17. cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
  18. if (ha->request_in < cnt)
  19. ha->req_q_count = cnt - ha->request_in;
  20. else
  21. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  22. (ha->request_in - cnt);
  23. }
  24. /* Check if room for request in request ring. */
  25. if ((req_cnt + 2) < ha->req_q_count)
  26. return 1;
  27. else
  28. return 0;
  29. }
  30. static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
  31. {
  32. /* Advance request queue pointer */
  33. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  34. ha->request_in = 0;
  35. ha->request_ptr = ha->request_ring;
  36. } else {
  37. ha->request_in++;
  38. ha->request_ptr++;
  39. }
  40. }
  41. /**
  42. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  43. * @ha: Pointer to host adapter structure.
  44. * @queue_entry: Pointer to pointer to queue entry structure
  45. *
  46. * This routine performs the following tasks:
  47. * - returns the current request_in pointer (if queue not full)
  48. * - advances the request_in pointer
  49. * - checks for queue full
  50. **/
  51. static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  52. struct queue_entry **queue_entry)
  53. {
  54. uint16_t req_cnt = 1;
  55. if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
  56. *queue_entry = ha->request_ptr;
  57. memset(*queue_entry, 0, sizeof(**queue_entry));
  58. qla4xxx_advance_req_ring_ptr(ha);
  59. ha->req_q_count -= req_cnt;
  60. return QLA_SUCCESS;
  61. }
  62. return QLA_ERROR;
  63. }
  64. /**
  65. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  66. * @ha: Pointer to host adapter structure.
  67. * @ddb_entry: Pointer to device database entry
  68. * @lun: SCSI LUN
  69. * @mrkr_mod: marker identifier
  70. *
  71. * This routine issues a marker IOCB.
  72. **/
  73. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  74. struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
  75. {
  76. struct qla4_marker_entry *marker_entry;
  77. unsigned long flags = 0;
  78. uint8_t status = QLA_SUCCESS;
  79. /* Acquire hardware specific lock */
  80. spin_lock_irqsave(&ha->hardware_lock, flags);
  81. /* Get pointer to the queue entry for the marker */
  82. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  83. QLA_SUCCESS) {
  84. status = QLA_ERROR;
  85. goto exit_send_marker;
  86. }
  87. /* Put the marker in the request queue */
  88. marker_entry->hdr.entryType = ET_MARKER;
  89. marker_entry->hdr.entryCount = 1;
  90. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  91. marker_entry->modifier = cpu_to_le16(mrkr_mod);
  92. int_to_scsilun(lun, &marker_entry->lun);
  93. wmb();
  94. /* Tell ISP it's got a new I/O request */
  95. ha->isp_ops->queue_iocb(ha);
  96. exit_send_marker:
  97. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  98. return status;
  99. }
  100. static struct continuation_t1_entry *
  101. qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
  102. {
  103. struct continuation_t1_entry *cont_entry;
  104. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  105. qla4xxx_advance_req_ring_ptr(ha);
  106. /* Load packet defaults */
  107. cont_entry->hdr.entryType = ET_CONTINUE;
  108. cont_entry->hdr.entryCount = 1;
  109. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  110. return cont_entry;
  111. }
  112. static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  113. {
  114. uint16_t iocbs;
  115. iocbs = 1;
  116. if (dsds > COMMAND_SEG) {
  117. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  118. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  119. iocbs++;
  120. }
  121. return iocbs;
  122. }
  123. static void qla4xxx_build_scsi_iocbs(struct srb *srb,
  124. struct command_t3_entry *cmd_entry,
  125. uint16_t tot_dsds)
  126. {
  127. struct scsi_qla_host *ha;
  128. uint16_t avail_dsds;
  129. struct data_seg_a64 *cur_dsd;
  130. struct scsi_cmnd *cmd;
  131. struct scatterlist *sg;
  132. int i;
  133. cmd = srb->cmd;
  134. ha = srb->ha;
  135. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  136. /* No data being transferred */
  137. cmd_entry->ttlByteCnt = cpu_to_le32(0);
  138. return;
  139. }
  140. avail_dsds = COMMAND_SEG;
  141. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  142. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  143. dma_addr_t sle_dma;
  144. /* Allocate additional continuation packets? */
  145. if (avail_dsds == 0) {
  146. struct continuation_t1_entry *cont_entry;
  147. cont_entry = qla4xxx_alloc_cont_entry(ha);
  148. cur_dsd =
  149. (struct data_seg_a64 *)
  150. &cont_entry->dataseg[0];
  151. avail_dsds = CONTINUE_SEG;
  152. }
  153. sle_dma = sg_dma_address(sg);
  154. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  155. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  156. cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
  157. avail_dsds--;
  158. cur_dsd++;
  159. }
  160. }
  161. void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
  162. {
  163. writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
  164. readl(&ha->qla4_83xx_reg->req_q_in);
  165. }
  166. void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
  167. {
  168. writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
  169. readl(&ha->qla4_83xx_reg->rsp_q_out);
  170. }
  171. /**
  172. * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
  173. * @ha: pointer to host adapter structure.
  174. *
  175. * This routine notifies the ISP that one or more new request
  176. * queue entries have been placed on the request queue.
  177. **/
  178. void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
  179. {
  180. uint32_t dbval = 0;
  181. dbval = 0x14 | (ha->func_num << 5);
  182. dbval = dbval | (0 << 8) | (ha->request_in << 16);
  183. qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
  184. }
  185. /**
  186. * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
  187. * @ha: pointer to host adapter structure.
  188. *
  189. * This routine notifies the ISP that one or more response/completion
  190. * queue entries have been processed by the driver.
  191. * This also clears the interrupt.
  192. **/
  193. void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
  194. {
  195. writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
  196. readl(&ha->qla4_82xx_reg->rsp_q_out);
  197. }
  198. /**
  199. * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
  200. * @ha: pointer to host adapter structure.
  201. *
  202. * This routine is notifies the ISP that one or more new request
  203. * queue entries have been placed on the request queue.
  204. **/
  205. void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
  206. {
  207. writel(ha->request_in, &ha->reg->req_q_in);
  208. readl(&ha->reg->req_q_in);
  209. }
  210. /**
  211. * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
  212. * @ha: pointer to host adapter structure.
  213. *
  214. * This routine is notifies the ISP that one or more response/completion
  215. * queue entries have been processed by the driver.
  216. * This also clears the interrupt.
  217. **/
  218. void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
  219. {
  220. writel(ha->response_out, &ha->reg->rsp_q_out);
  221. readl(&ha->reg->rsp_q_out);
  222. }
  223. /**
  224. * qla4xxx_send_command_to_isp - issues command to HBA
  225. * @ha: pointer to host adapter structure.
  226. * @srb: pointer to SCSI Request Block to be sent to ISP
  227. *
  228. * This routine is called by qla4xxx_queuecommand to build an ISP
  229. * command and pass it to the ISP for execution.
  230. **/
  231. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  232. {
  233. struct scsi_cmnd *cmd = srb->cmd;
  234. struct ddb_entry *ddb_entry;
  235. struct command_t3_entry *cmd_entry;
  236. int nseg;
  237. uint16_t tot_dsds;
  238. uint16_t req_cnt;
  239. unsigned long flags;
  240. uint32_t index;
  241. /* Get real lun and adapter */
  242. ddb_entry = srb->ddb;
  243. tot_dsds = 0;
  244. /* Acquire hardware specific lock */
  245. spin_lock_irqsave(&ha->hardware_lock, flags);
  246. index = scsi_cmd_to_rq(cmd)->tag;
  247. /*
  248. * Check to see if adapter is online before placing request on
  249. * request queue. If a reset occurs and a request is in the queue,
  250. * the firmware will still attempt to process the request, retrieving
  251. * garbage for pointers.
  252. */
  253. if (!test_bit(AF_ONLINE, &ha->flags)) {
  254. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  255. "Do not issue command.\n",
  256. ha->host_no, __func__));
  257. goto queuing_error;
  258. }
  259. /* Calculate the number of request entries needed. */
  260. nseg = scsi_dma_map(cmd);
  261. if (nseg < 0)
  262. goto queuing_error;
  263. tot_dsds = nseg;
  264. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  265. if (!qla4xxx_space_in_req_ring(ha, req_cnt))
  266. goto queuing_error;
  267. /* total iocbs active */
  268. if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
  269. goto queuing_error;
  270. /* Build command packet */
  271. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  272. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  273. cmd_entry->hdr.entryType = ET_COMMAND;
  274. cmd_entry->handle = cpu_to_le32(index);
  275. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  276. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  277. cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
  278. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  279. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  280. cmd_entry->hdr.entryCount = req_cnt;
  281. /* Set data transfer direction control flags
  282. * NOTE: Look at data_direction bits iff there is data to be
  283. * transferred, as the data direction bit is sometimed filled
  284. * in when there is no data to be transferred */
  285. cmd_entry->control_flags = CF_NO_DATA;
  286. if (scsi_bufflen(cmd)) {
  287. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  288. cmd_entry->control_flags = CF_WRITE;
  289. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  290. cmd_entry->control_flags = CF_READ;
  291. ha->bytes_xfered += scsi_bufflen(cmd);
  292. if (ha->bytes_xfered & ~0xFFFFF){
  293. ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
  294. ha->bytes_xfered &= 0xFFFFF;
  295. }
  296. }
  297. /* Set tagged queueing control flags */
  298. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  299. qla4xxx_advance_req_ring_ptr(ha);
  300. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  301. wmb();
  302. srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
  303. /* update counters */
  304. srb->state = SRB_ACTIVE_STATE;
  305. srb->flags |= SRB_DMA_VALID;
  306. /* Track IOCB used */
  307. ha->iocb_cnt += req_cnt;
  308. srb->iocb_cnt = req_cnt;
  309. ha->req_q_count -= req_cnt;
  310. ha->isp_ops->queue_iocb(ha);
  311. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  312. return QLA_SUCCESS;
  313. queuing_error:
  314. if (tot_dsds)
  315. scsi_dma_unmap(cmd);
  316. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  317. return QLA_ERROR;
  318. }
  319. int qla4xxx_send_passthru0(struct iscsi_task *task)
  320. {
  321. struct passthru0 *passthru_iocb;
  322. struct iscsi_session *sess = task->conn->session;
  323. struct ddb_entry *ddb_entry = sess->dd_data;
  324. struct scsi_qla_host *ha = ddb_entry->ha;
  325. struct ql4_task_data *task_data = task->dd_data;
  326. uint16_t ctrl_flags = 0;
  327. unsigned long flags;
  328. int ret = QLA_ERROR;
  329. spin_lock_irqsave(&ha->hardware_lock, flags);
  330. task_data->iocb_req_cnt = 1;
  331. /* Put the IOCB on the request queue */
  332. if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
  333. goto queuing_error;
  334. passthru_iocb = (struct passthru0 *) ha->request_ptr;
  335. memset(passthru_iocb, 0, sizeof(struct passthru0));
  336. passthru_iocb->hdr.entryType = ET_PASSTHRU0;
  337. passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
  338. passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
  339. passthru_iocb->handle = task->itt;
  340. passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  341. passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
  342. /* Setup the out & in DSDs */
  343. if (task_data->req_len) {
  344. memcpy((uint8_t *)task_data->req_buffer +
  345. sizeof(struct iscsi_hdr), task->data, task->data_count);
  346. ctrl_flags |= PT_FLAG_SEND_BUFFER;
  347. passthru_iocb->out_dsd.base.addrLow =
  348. cpu_to_le32(LSDW(task_data->req_dma));
  349. passthru_iocb->out_dsd.base.addrHigh =
  350. cpu_to_le32(MSDW(task_data->req_dma));
  351. passthru_iocb->out_dsd.count =
  352. cpu_to_le32(task->data_count +
  353. sizeof(struct iscsi_hdr));
  354. }
  355. if (task_data->resp_len) {
  356. passthru_iocb->in_dsd.base.addrLow =
  357. cpu_to_le32(LSDW(task_data->resp_dma));
  358. passthru_iocb->in_dsd.base.addrHigh =
  359. cpu_to_le32(MSDW(task_data->resp_dma));
  360. passthru_iocb->in_dsd.count =
  361. cpu_to_le32(task_data->resp_len);
  362. }
  363. ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
  364. passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
  365. /* Update the request pointer */
  366. qla4xxx_advance_req_ring_ptr(ha);
  367. wmb();
  368. /* Track IOCB used */
  369. ha->iocb_cnt += task_data->iocb_req_cnt;
  370. ha->req_q_count -= task_data->iocb_req_cnt;
  371. ha->isp_ops->queue_iocb(ha);
  372. ret = QLA_SUCCESS;
  373. queuing_error:
  374. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  375. return ret;
  376. }
  377. static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
  378. {
  379. struct mrb *mrb;
  380. mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
  381. if (!mrb)
  382. return mrb;
  383. mrb->ha = ha;
  384. return mrb;
  385. }
  386. static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
  387. uint32_t *in_mbox)
  388. {
  389. int rval = QLA_SUCCESS;
  390. uint32_t i;
  391. unsigned long flags;
  392. uint32_t index = 0;
  393. /* Acquire hardware specific lock */
  394. spin_lock_irqsave(&ha->hardware_lock, flags);
  395. /* Get pointer to the queue entry for the marker */
  396. rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
  397. if (rval != QLA_SUCCESS)
  398. goto exit_mbox_iocb;
  399. index = ha->mrb_index;
  400. /* get valid mrb index*/
  401. for (i = 0; i < MAX_MRB; i++) {
  402. index++;
  403. if (index == MAX_MRB)
  404. index = 1;
  405. if (ha->active_mrb_array[index] == NULL) {
  406. ha->mrb_index = index;
  407. break;
  408. }
  409. }
  410. mrb->iocb_cnt = 1;
  411. ha->active_mrb_array[index] = mrb;
  412. mrb->mbox->handle = index;
  413. mrb->mbox->hdr.entryType = ET_MBOX_CMD;
  414. mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
  415. memcpy(mrb->mbox->in_mbox, in_mbox, 32);
  416. mrb->mbox_cmd = in_mbox[0];
  417. wmb();
  418. ha->iocb_cnt += mrb->iocb_cnt;
  419. ha->isp_ops->queue_iocb(ha);
  420. exit_mbox_iocb:
  421. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  422. return rval;
  423. }
  424. int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
  425. uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
  426. {
  427. uint32_t in_mbox[8];
  428. struct mrb *mrb = NULL;
  429. int rval = QLA_SUCCESS;
  430. memset(in_mbox, 0, sizeof(in_mbox));
  431. mrb = qla4xxx_get_new_mrb(ha);
  432. if (!mrb) {
  433. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
  434. __func__));
  435. rval = QLA_ERROR;
  436. goto exit_ping;
  437. }
  438. in_mbox[0] = MBOX_CMD_PING;
  439. in_mbox[1] = options;
  440. memcpy(&in_mbox[2], &ipaddr[0], 4);
  441. memcpy(&in_mbox[3], &ipaddr[4], 4);
  442. memcpy(&in_mbox[4], &ipaddr[8], 4);
  443. memcpy(&in_mbox[5], &ipaddr[12], 4);
  444. in_mbox[6] = payload_size;
  445. mrb->pid = pid;
  446. rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
  447. if (rval != QLA_SUCCESS)
  448. goto exit_ping;
  449. return rval;
  450. exit_ping:
  451. kfree(mrb);
  452. return rval;
  453. }