ql4_bsg.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic iSCSI HBA Driver
  4. * Copyright (c) 2011-2013 QLogic Corporation
  5. */
  6. #include "ql4_def.h"
  7. #include "ql4_glbl.h"
  8. #include "ql4_bsg.h"
  9. static int
  10. qla4xxx_read_flash(struct bsg_job *bsg_job)
  11. {
  12. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  13. struct scsi_qla_host *ha = to_qla_host(host);
  14. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  15. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  16. uint32_t offset = 0;
  17. uint32_t length = 0;
  18. dma_addr_t flash_dma;
  19. uint8_t *flash = NULL;
  20. int rval = -EINVAL;
  21. bsg_reply->reply_payload_rcv_len = 0;
  22. if (unlikely(pci_channel_offline(ha->pdev)))
  23. goto leave;
  24. if (ql4xxx_reset_active(ha)) {
  25. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  26. rval = -EBUSY;
  27. goto leave;
  28. }
  29. if (ha->flash_state != QLFLASH_WAITING) {
  30. ql4_printk(KERN_ERR, ha, "%s: another flash operation "
  31. "active\n", __func__);
  32. rval = -EBUSY;
  33. goto leave;
  34. }
  35. ha->flash_state = QLFLASH_READING;
  36. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  37. length = bsg_job->reply_payload.payload_len;
  38. flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
  39. GFP_KERNEL);
  40. if (!flash) {
  41. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  42. "data\n", __func__);
  43. rval = -ENOMEM;
  44. goto leave;
  45. }
  46. rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
  47. if (rval) {
  48. ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
  49. bsg_reply->result = DID_ERROR << 16;
  50. rval = -EIO;
  51. } else {
  52. bsg_reply->reply_payload_rcv_len =
  53. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  54. bsg_job->reply_payload.sg_cnt,
  55. flash, length);
  56. bsg_reply->result = DID_OK << 16;
  57. }
  58. bsg_job_done(bsg_job, bsg_reply->result,
  59. bsg_reply->reply_payload_rcv_len);
  60. dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
  61. leave:
  62. ha->flash_state = QLFLASH_WAITING;
  63. return rval;
  64. }
  65. static int
  66. qla4xxx_update_flash(struct bsg_job *bsg_job)
  67. {
  68. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  69. struct scsi_qla_host *ha = to_qla_host(host);
  70. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  71. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  72. uint32_t length = 0;
  73. uint32_t offset = 0;
  74. uint32_t options = 0;
  75. dma_addr_t flash_dma;
  76. uint8_t *flash = NULL;
  77. int rval = -EINVAL;
  78. bsg_reply->reply_payload_rcv_len = 0;
  79. if (unlikely(pci_channel_offline(ha->pdev)))
  80. goto leave;
  81. if (ql4xxx_reset_active(ha)) {
  82. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  83. rval = -EBUSY;
  84. goto leave;
  85. }
  86. if (ha->flash_state != QLFLASH_WAITING) {
  87. ql4_printk(KERN_ERR, ha, "%s: another flash operation "
  88. "active\n", __func__);
  89. rval = -EBUSY;
  90. goto leave;
  91. }
  92. ha->flash_state = QLFLASH_WRITING;
  93. length = bsg_job->request_payload.payload_len;
  94. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  95. options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  96. flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
  97. GFP_KERNEL);
  98. if (!flash) {
  99. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  100. "data\n", __func__);
  101. rval = -ENOMEM;
  102. goto leave;
  103. }
  104. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  105. bsg_job->request_payload.sg_cnt, flash, length);
  106. rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
  107. if (rval) {
  108. ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
  109. bsg_reply->result = DID_ERROR << 16;
  110. rval = -EIO;
  111. } else
  112. bsg_reply->result = DID_OK << 16;
  113. bsg_job_done(bsg_job, bsg_reply->result,
  114. bsg_reply->reply_payload_rcv_len);
  115. dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
  116. leave:
  117. ha->flash_state = QLFLASH_WAITING;
  118. return rval;
  119. }
  120. static int
  121. qla4xxx_get_acb_state(struct bsg_job *bsg_job)
  122. {
  123. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  124. struct scsi_qla_host *ha = to_qla_host(host);
  125. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  126. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  127. uint32_t status[MBOX_REG_COUNT];
  128. uint32_t acb_idx;
  129. uint32_t ip_idx;
  130. int rval = -EINVAL;
  131. bsg_reply->reply_payload_rcv_len = 0;
  132. if (unlikely(pci_channel_offline(ha->pdev)))
  133. goto leave;
  134. /* Only 4022 and above adapters are supported */
  135. if (is_qla4010(ha))
  136. goto leave;
  137. if (ql4xxx_reset_active(ha)) {
  138. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  139. rval = -EBUSY;
  140. goto leave;
  141. }
  142. if (bsg_job->reply_payload.payload_len < sizeof(status)) {
  143. ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
  144. __func__, bsg_job->reply_payload.payload_len);
  145. rval = -EINVAL;
  146. goto leave;
  147. }
  148. acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  149. ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  150. rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
  151. if (rval) {
  152. ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
  153. __func__);
  154. bsg_reply->result = DID_ERROR << 16;
  155. rval = -EIO;
  156. } else {
  157. bsg_reply->reply_payload_rcv_len =
  158. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  159. bsg_job->reply_payload.sg_cnt,
  160. status, sizeof(status));
  161. bsg_reply->result = DID_OK << 16;
  162. }
  163. bsg_job_done(bsg_job, bsg_reply->result,
  164. bsg_reply->reply_payload_rcv_len);
  165. leave:
  166. return rval;
  167. }
  168. static int
  169. qla4xxx_read_nvram(struct bsg_job *bsg_job)
  170. {
  171. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  172. struct scsi_qla_host *ha = to_qla_host(host);
  173. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  174. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  175. uint32_t offset = 0;
  176. uint32_t len = 0;
  177. uint32_t total_len = 0;
  178. dma_addr_t nvram_dma;
  179. uint8_t *nvram = NULL;
  180. int rval = -EINVAL;
  181. bsg_reply->reply_payload_rcv_len = 0;
  182. if (unlikely(pci_channel_offline(ha->pdev)))
  183. goto leave;
  184. /* Only 40xx adapters are supported */
  185. if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
  186. goto leave;
  187. if (ql4xxx_reset_active(ha)) {
  188. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  189. rval = -EBUSY;
  190. goto leave;
  191. }
  192. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  193. len = bsg_job->reply_payload.payload_len;
  194. total_len = offset + len;
  195. /* total len should not be greater than max NVRAM size */
  196. if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
  197. ((is_qla4022(ha) || is_qla4032(ha)) &&
  198. total_len > QL40X2_NVRAM_SIZE)) {
  199. ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
  200. " nvram size, offset=%d len=%d\n",
  201. __func__, offset, len);
  202. goto leave;
  203. }
  204. nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
  205. GFP_KERNEL);
  206. if (!nvram) {
  207. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
  208. "data\n", __func__);
  209. rval = -ENOMEM;
  210. goto leave;
  211. }
  212. rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
  213. if (rval) {
  214. ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
  215. bsg_reply->result = DID_ERROR << 16;
  216. rval = -EIO;
  217. } else {
  218. bsg_reply->reply_payload_rcv_len =
  219. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  220. bsg_job->reply_payload.sg_cnt,
  221. nvram, len);
  222. bsg_reply->result = DID_OK << 16;
  223. }
  224. bsg_job_done(bsg_job, bsg_reply->result,
  225. bsg_reply->reply_payload_rcv_len);
  226. dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
  227. leave:
  228. return rval;
  229. }
  230. static int
  231. qla4xxx_update_nvram(struct bsg_job *bsg_job)
  232. {
  233. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  234. struct scsi_qla_host *ha = to_qla_host(host);
  235. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  236. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  237. uint32_t offset = 0;
  238. uint32_t len = 0;
  239. uint32_t total_len = 0;
  240. dma_addr_t nvram_dma;
  241. uint8_t *nvram = NULL;
  242. int rval = -EINVAL;
  243. bsg_reply->reply_payload_rcv_len = 0;
  244. if (unlikely(pci_channel_offline(ha->pdev)))
  245. goto leave;
  246. if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
  247. goto leave;
  248. if (ql4xxx_reset_active(ha)) {
  249. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  250. rval = -EBUSY;
  251. goto leave;
  252. }
  253. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  254. len = bsg_job->request_payload.payload_len;
  255. total_len = offset + len;
  256. /* total len should not be greater than max NVRAM size */
  257. if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
  258. ((is_qla4022(ha) || is_qla4032(ha)) &&
  259. total_len > QL40X2_NVRAM_SIZE)) {
  260. ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
  261. " nvram size, offset=%d len=%d\n",
  262. __func__, offset, len);
  263. goto leave;
  264. }
  265. nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
  266. GFP_KERNEL);
  267. if (!nvram) {
  268. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  269. "data\n", __func__);
  270. rval = -ENOMEM;
  271. goto leave;
  272. }
  273. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  274. bsg_job->request_payload.sg_cnt, nvram, len);
  275. rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
  276. if (rval) {
  277. ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
  278. bsg_reply->result = DID_ERROR << 16;
  279. rval = -EIO;
  280. } else
  281. bsg_reply->result = DID_OK << 16;
  282. bsg_job_done(bsg_job, bsg_reply->result,
  283. bsg_reply->reply_payload_rcv_len);
  284. dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
  285. leave:
  286. return rval;
  287. }
  288. static int
  289. qla4xxx_restore_defaults(struct bsg_job *bsg_job)
  290. {
  291. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  292. struct scsi_qla_host *ha = to_qla_host(host);
  293. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  294. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  295. uint32_t region = 0;
  296. uint32_t field0 = 0;
  297. uint32_t field1 = 0;
  298. int rval = -EINVAL;
  299. bsg_reply->reply_payload_rcv_len = 0;
  300. if (unlikely(pci_channel_offline(ha->pdev)))
  301. goto leave;
  302. if (is_qla4010(ha))
  303. goto leave;
  304. if (ql4xxx_reset_active(ha)) {
  305. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  306. rval = -EBUSY;
  307. goto leave;
  308. }
  309. region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  310. field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  311. field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
  312. rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
  313. if (rval) {
  314. ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
  315. bsg_reply->result = DID_ERROR << 16;
  316. rval = -EIO;
  317. } else
  318. bsg_reply->result = DID_OK << 16;
  319. bsg_job_done(bsg_job, bsg_reply->result,
  320. bsg_reply->reply_payload_rcv_len);
  321. leave:
  322. return rval;
  323. }
  324. static int
  325. qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
  326. {
  327. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  328. struct scsi_qla_host *ha = to_qla_host(host);
  329. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  330. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  331. uint32_t acb_type = 0;
  332. uint32_t len = 0;
  333. dma_addr_t acb_dma;
  334. uint8_t *acb = NULL;
  335. int rval = -EINVAL;
  336. bsg_reply->reply_payload_rcv_len = 0;
  337. if (unlikely(pci_channel_offline(ha->pdev)))
  338. goto leave;
  339. /* Only 4022 and above adapters are supported */
  340. if (is_qla4010(ha))
  341. goto leave;
  342. if (ql4xxx_reset_active(ha)) {
  343. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  344. rval = -EBUSY;
  345. goto leave;
  346. }
  347. acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  348. len = bsg_job->reply_payload.payload_len;
  349. if (len < sizeof(struct addr_ctrl_blk)) {
  350. ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
  351. __func__, len);
  352. rval = -EINVAL;
  353. goto leave;
  354. }
  355. acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
  356. if (!acb) {
  357. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
  358. "data\n", __func__);
  359. rval = -ENOMEM;
  360. goto leave;
  361. }
  362. rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
  363. if (rval) {
  364. ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
  365. bsg_reply->result = DID_ERROR << 16;
  366. rval = -EIO;
  367. } else {
  368. bsg_reply->reply_payload_rcv_len =
  369. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  370. bsg_job->reply_payload.sg_cnt,
  371. acb, len);
  372. bsg_reply->result = DID_OK << 16;
  373. }
  374. bsg_job_done(bsg_job, bsg_reply->result,
  375. bsg_reply->reply_payload_rcv_len);
  376. dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
  377. leave:
  378. return rval;
  379. }
  380. static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
  381. {
  382. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  383. struct scsi_qla_host *ha = to_qla_host(host);
  384. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  385. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  386. uint8_t *rsp_ptr = NULL;
  387. uint32_t mbox_cmd[MBOX_REG_COUNT];
  388. uint32_t mbox_sts[MBOX_REG_COUNT];
  389. int status = QLA_ERROR;
  390. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  391. if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
  392. ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
  393. __func__);
  394. bsg_reply->result = DID_ERROR << 16;
  395. goto exit_diag_mem_test;
  396. }
  397. bsg_reply->reply_payload_rcv_len = 0;
  398. memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
  399. sizeof(uint32_t) * MBOX_REG_COUNT);
  400. DEBUG2(ql4_printk(KERN_INFO, ha,
  401. "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  402. __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
  403. mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
  404. mbox_cmd[7]));
  405. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  406. &mbox_sts[0]);
  407. DEBUG2(ql4_printk(KERN_INFO, ha,
  408. "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  409. __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
  410. mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
  411. mbox_sts[7]));
  412. if (status == QLA_SUCCESS)
  413. bsg_reply->result = DID_OK << 16;
  414. else
  415. bsg_reply->result = DID_ERROR << 16;
  416. /* Send mbox_sts to application */
  417. bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
  418. rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
  419. memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
  420. exit_diag_mem_test:
  421. DEBUG2(ql4_printk(KERN_INFO, ha,
  422. "%s: bsg_reply->result = x%x, status = %s\n",
  423. __func__, bsg_reply->result, STATUS(status)));
  424. bsg_job_done(bsg_job, bsg_reply->result,
  425. bsg_reply->reply_payload_rcv_len);
  426. }
  427. static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
  428. int wait_for_link)
  429. {
  430. int status = QLA_SUCCESS;
  431. if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
  432. ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
  433. __func__, ha->idc_extend_tmo);
  434. if (ha->idc_extend_tmo) {
  435. if (!wait_for_completion_timeout(&ha->idc_comp,
  436. (ha->idc_extend_tmo * HZ))) {
  437. ha->notify_idc_comp = 0;
  438. ha->notify_link_up_comp = 0;
  439. ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
  440. __func__);
  441. status = QLA_ERROR;
  442. goto exit_wait;
  443. } else {
  444. DEBUG2(ql4_printk(KERN_INFO, ha,
  445. "%s: IDC Complete notification received\n",
  446. __func__));
  447. }
  448. }
  449. } else {
  450. DEBUG2(ql4_printk(KERN_INFO, ha,
  451. "%s: IDC Complete notification received\n",
  452. __func__));
  453. }
  454. ha->notify_idc_comp = 0;
  455. if (wait_for_link) {
  456. if (!wait_for_completion_timeout(&ha->link_up_comp,
  457. (IDC_COMP_TOV * HZ))) {
  458. ha->notify_link_up_comp = 0;
  459. ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
  460. __func__);
  461. status = QLA_ERROR;
  462. goto exit_wait;
  463. } else {
  464. DEBUG2(ql4_printk(KERN_INFO, ha,
  465. "%s: LINK UP notification received\n",
  466. __func__));
  467. }
  468. ha->notify_link_up_comp = 0;
  469. }
  470. exit_wait:
  471. return status;
  472. }
  473. static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
  474. uint32_t *mbox_cmd)
  475. {
  476. uint32_t config = 0;
  477. int status = QLA_SUCCESS;
  478. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  479. status = qla4_83xx_get_port_config(ha, &config);
  480. if (status != QLA_SUCCESS)
  481. goto exit_pre_loopback_config;
  482. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
  483. __func__, config));
  484. if ((config & ENABLE_INTERNAL_LOOPBACK) ||
  485. (config & ENABLE_EXTERNAL_LOOPBACK)) {
  486. ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n",
  487. __func__);
  488. goto exit_pre_loopback_config;
  489. }
  490. if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
  491. config |= ENABLE_INTERNAL_LOOPBACK;
  492. if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
  493. config |= ENABLE_EXTERNAL_LOOPBACK;
  494. config &= ~ENABLE_DCBX;
  495. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
  496. __func__, config));
  497. ha->notify_idc_comp = 1;
  498. ha->notify_link_up_comp = 1;
  499. /* get the link state */
  500. qla4xxx_get_firmware_state(ha);
  501. status = qla4_83xx_set_port_config(ha, &config);
  502. if (status != QLA_SUCCESS) {
  503. ha->notify_idc_comp = 0;
  504. ha->notify_link_up_comp = 0;
  505. goto exit_pre_loopback_config;
  506. }
  507. exit_pre_loopback_config:
  508. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
  509. STATUS(status)));
  510. return status;
  511. }
  512. static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
  513. uint32_t *mbox_cmd)
  514. {
  515. int status = QLA_SUCCESS;
  516. uint32_t config = 0;
  517. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  518. status = qla4_83xx_get_port_config(ha, &config);
  519. if (status != QLA_SUCCESS)
  520. goto exit_post_loopback_config;
  521. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
  522. config));
  523. if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
  524. config &= ~ENABLE_INTERNAL_LOOPBACK;
  525. else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
  526. config &= ~ENABLE_EXTERNAL_LOOPBACK;
  527. config |= ENABLE_DCBX;
  528. DEBUG2(ql4_printk(KERN_INFO, ha,
  529. "%s: Restore default port config=%08X\n", __func__,
  530. config));
  531. ha->notify_idc_comp = 1;
  532. if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
  533. ha->notify_link_up_comp = 1;
  534. status = qla4_83xx_set_port_config(ha, &config);
  535. if (status != QLA_SUCCESS) {
  536. ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
  537. __func__);
  538. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  539. clear_bit(AF_LOOPBACK, &ha->flags);
  540. goto exit_post_loopback_config;
  541. }
  542. exit_post_loopback_config:
  543. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
  544. STATUS(status)));
  545. return status;
  546. }
  547. static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
  548. {
  549. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  550. struct scsi_qla_host *ha = to_qla_host(host);
  551. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  552. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  553. uint8_t *rsp_ptr = NULL;
  554. uint32_t mbox_cmd[MBOX_REG_COUNT];
  555. uint32_t mbox_sts[MBOX_REG_COUNT];
  556. int wait_for_link = 1;
  557. int status = QLA_ERROR;
  558. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  559. bsg_reply->reply_payload_rcv_len = 0;
  560. if (test_bit(AF_LOOPBACK, &ha->flags)) {
  561. ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
  562. __func__);
  563. bsg_reply->result = DID_ERROR << 16;
  564. goto exit_loopback_cmd;
  565. }
  566. if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
  567. ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
  568. __func__);
  569. bsg_reply->result = DID_ERROR << 16;
  570. goto exit_loopback_cmd;
  571. }
  572. memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
  573. sizeof(uint32_t) * MBOX_REG_COUNT);
  574. if (is_qla8032(ha) || is_qla8042(ha)) {
  575. status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
  576. if (status != QLA_SUCCESS) {
  577. bsg_reply->result = DID_ERROR << 16;
  578. goto exit_loopback_cmd;
  579. }
  580. status = qla4_83xx_wait_for_loopback_config_comp(ha,
  581. wait_for_link);
  582. if (status != QLA_SUCCESS) {
  583. bsg_reply->result = DID_TIME_OUT << 16;
  584. goto restore;
  585. }
  586. }
  587. DEBUG2(ql4_printk(KERN_INFO, ha,
  588. "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  589. __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
  590. mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
  591. mbox_cmd[7]));
  592. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  593. &mbox_sts[0]);
  594. if (status == QLA_SUCCESS)
  595. bsg_reply->result = DID_OK << 16;
  596. else
  597. bsg_reply->result = DID_ERROR << 16;
  598. DEBUG2(ql4_printk(KERN_INFO, ha,
  599. "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  600. __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
  601. mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
  602. mbox_sts[7]));
  603. /* Send mbox_sts to application */
  604. bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
  605. rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
  606. memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
  607. restore:
  608. if (is_qla8032(ha) || is_qla8042(ha)) {
  609. status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
  610. if (status != QLA_SUCCESS) {
  611. bsg_reply->result = DID_ERROR << 16;
  612. goto exit_loopback_cmd;
  613. }
  614. /* for pre_loopback_config() wait for LINK UP only
  615. * if PHY LINK is UP */
  616. if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
  617. wait_for_link = 0;
  618. status = qla4_83xx_wait_for_loopback_config_comp(ha,
  619. wait_for_link);
  620. if (status != QLA_SUCCESS) {
  621. bsg_reply->result = DID_TIME_OUT << 16;
  622. goto exit_loopback_cmd;
  623. }
  624. }
  625. exit_loopback_cmd:
  626. DEBUG2(ql4_printk(KERN_INFO, ha,
  627. "%s: bsg_reply->result = x%x, status = %s\n",
  628. __func__, bsg_reply->result, STATUS(status)));
  629. bsg_job_done(bsg_job, bsg_reply->result,
  630. bsg_reply->reply_payload_rcv_len);
  631. }
  632. static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
  633. {
  634. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  635. struct scsi_qla_host *ha = to_qla_host(host);
  636. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  637. uint32_t diag_cmd;
  638. int rval = -EINVAL;
  639. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  640. diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  641. if (diag_cmd == MBOX_CMD_DIAG_TEST) {
  642. switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
  643. case QL_DIAG_CMD_TEST_DDR_SIZE:
  644. case QL_DIAG_CMD_TEST_DDR_RW:
  645. case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
  646. case QL_DIAG_CMD_TEST_NVRAM:
  647. case QL_DIAG_CMD_TEST_FLASH_ROM:
  648. case QL_DIAG_CMD_TEST_DMA_XFER:
  649. case QL_DIAG_CMD_SELF_DDR_RW:
  650. case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
  651. /* Execute diag test for adapter RAM/FLASH */
  652. ql4xxx_execute_diag_cmd(bsg_job);
  653. /* Always return success as we want to sent bsg_reply
  654. * to Application */
  655. rval = QLA_SUCCESS;
  656. break;
  657. case QL_DIAG_CMD_TEST_INT_LOOPBACK:
  658. case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
  659. /* Execute diag test for Network */
  660. qla4xxx_execute_diag_loopback_cmd(bsg_job);
  661. /* Always return success as we want to sent bsg_reply
  662. * to Application */
  663. rval = QLA_SUCCESS;
  664. break;
  665. default:
  666. ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
  667. __func__,
  668. bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
  669. }
  670. } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
  671. (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
  672. ql4xxx_execute_diag_cmd(bsg_job);
  673. rval = QLA_SUCCESS;
  674. } else {
  675. ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
  676. __func__, diag_cmd);
  677. }
  678. return rval;
  679. }
  680. /**
  681. * qla4xxx_process_vendor_specific - handle vendor specific bsg request
  682. * @bsg_job: iscsi_bsg_job to handle
  683. **/
  684. int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
  685. {
  686. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  687. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  688. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  689. struct scsi_qla_host *ha = to_qla_host(host);
  690. switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
  691. case QLISCSI_VND_READ_FLASH:
  692. return qla4xxx_read_flash(bsg_job);
  693. case QLISCSI_VND_UPDATE_FLASH:
  694. return qla4xxx_update_flash(bsg_job);
  695. case QLISCSI_VND_GET_ACB_STATE:
  696. return qla4xxx_get_acb_state(bsg_job);
  697. case QLISCSI_VND_READ_NVRAM:
  698. return qla4xxx_read_nvram(bsg_job);
  699. case QLISCSI_VND_UPDATE_NVRAM:
  700. return qla4xxx_update_nvram(bsg_job);
  701. case QLISCSI_VND_RESTORE_DEFAULTS:
  702. return qla4xxx_restore_defaults(bsg_job);
  703. case QLISCSI_VND_GET_ACB:
  704. return qla4xxx_bsg_get_acb(bsg_job);
  705. case QLISCSI_VND_DIAG_TEST:
  706. return qla4xxx_execute_diag_test(bsg_job);
  707. default:
  708. ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
  709. "0x%x\n", __func__, bsg_req->msgcode);
  710. bsg_reply->result = (DID_ERROR << 16);
  711. bsg_reply->reply_payload_rcv_len = 0;
  712. bsg_job_done(bsg_job, bsg_reply->result,
  713. bsg_reply->reply_payload_rcv_len);
  714. return -ENOSYS;
  715. }
  716. }
  717. /**
  718. * qla4xxx_bsg_request - handle bsg request from ISCSI transport
  719. * @bsg_job: iscsi_bsg_job to handle
  720. */
  721. int qla4xxx_bsg_request(struct bsg_job *bsg_job)
  722. {
  723. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  724. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  725. struct scsi_qla_host *ha = to_qla_host(host);
  726. switch (bsg_req->msgcode) {
  727. case ISCSI_BSG_HST_VENDOR:
  728. return qla4xxx_process_vendor_specific(bsg_job);
  729. default:
  730. ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
  731. __func__, bsg_req->msgcode);
  732. }
  733. return -ENOSYS;
  734. }