snic_disc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. #include <linux/errno.h>
  4. #include <linux/mempool.h>
  5. #include <scsi/scsi_tcq.h>
  6. #include "snic_disc.h"
  7. #include "snic.h"
  8. #include "snic_io.h"
  9. /* snic target types */
  10. static const char * const snic_tgt_type_str[] = {
  11. [SNIC_TGT_DAS] = "DAS",
  12. [SNIC_TGT_SAN] = "SAN",
  13. };
  14. static inline const char *
  15. snic_tgt_type_to_str(int typ)
  16. {
  17. return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
  18. snic_tgt_type_str[typ] : "Unknown");
  19. }
  20. static const char * const snic_tgt_state_str[] = {
  21. [SNIC_TGT_STAT_INIT] = "INIT",
  22. [SNIC_TGT_STAT_ONLINE] = "ONLINE",
  23. [SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
  24. [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
  25. };
  26. const char *
  27. snic_tgt_state_to_str(int state)
  28. {
  29. return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
  30. snic_tgt_state_str[state] : "UNKNOWN");
  31. }
  32. /*
  33. * Initiate report_tgt req desc
  34. */
  35. static void
  36. snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
  37. dma_addr_t rsp_buf_pa, ulong ctx)
  38. {
  39. struct snic_sg_desc *sgd = NULL;
  40. snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
  41. 1, ctx);
  42. req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
  43. sgd = req_to_sgl(req);
  44. sgd[0].addr = cpu_to_le64(rsp_buf_pa);
  45. sgd[0].len = cpu_to_le32(len);
  46. sgd[0]._resvd = 0;
  47. req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
  48. }
  49. /*
  50. * snic_queue_report_tgt_req: Queues report target request.
  51. */
  52. static int
  53. snic_queue_report_tgt_req(struct snic *snic)
  54. {
  55. struct snic_req_info *rqi = NULL;
  56. u32 ntgts, buf_len = 0;
  57. u8 *buf = NULL;
  58. dma_addr_t pa = 0;
  59. int ret = 0;
  60. rqi = snic_req_init(snic, 1);
  61. if (!rqi) {
  62. ret = -ENOMEM;
  63. goto error;
  64. }
  65. if (snic->fwinfo.max_tgts)
  66. ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
  67. else
  68. ntgts = snic->shost->max_id;
  69. /* Allocate Response Buffer */
  70. SNIC_BUG_ON(ntgts == 0);
  71. buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
  72. buf = kzalloc(buf_len, GFP_KERNEL);
  73. if (!buf) {
  74. snic_req_free(snic, rqi);
  75. SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
  76. ret = -ENOMEM;
  77. goto error;
  78. }
  79. SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
  80. pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
  81. if (dma_mapping_error(&snic->pdev->dev, pa)) {
  82. SNIC_HOST_ERR(snic->shost,
  83. "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
  84. buf);
  85. kfree(buf);
  86. snic_req_free(snic, rqi);
  87. ret = -EINVAL;
  88. goto error;
  89. }
  90. SNIC_BUG_ON(pa == 0);
  91. rqi->sge_va = (ulong) buf;
  92. snic_report_tgt_init(rqi->req,
  93. snic->config.hid,
  94. buf,
  95. buf_len,
  96. pa,
  97. (ulong)rqi);
  98. snic_handle_untagged_req(snic, rqi);
  99. ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
  100. if (ret) {
  101. dma_unmap_single(&snic->pdev->dev, pa, buf_len,
  102. DMA_FROM_DEVICE);
  103. kfree(buf);
  104. rqi->sge_va = 0;
  105. snic_release_untagged_req(snic, rqi);
  106. SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
  107. goto error;
  108. }
  109. SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
  110. return ret;
  111. error:
  112. SNIC_HOST_ERR(snic->shost,
  113. "Queuing Report Targets Failed, err = %d\n",
  114. ret);
  115. return ret;
  116. } /* end of snic_queue_report_tgt_req */
  117. /* call into SML */
  118. static void
  119. snic_scsi_scan_tgt(struct work_struct *work)
  120. {
  121. struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
  122. struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
  123. unsigned long flags;
  124. SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
  125. scsi_scan_target(&tgt->dev,
  126. tgt->channel,
  127. tgt->scsi_tgt_id,
  128. SCAN_WILD_CARD,
  129. SCSI_SCAN_RESCAN);
  130. spin_lock_irqsave(shost->host_lock, flags);
  131. tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
  132. spin_unlock_irqrestore(shost->host_lock, flags);
  133. } /* end of snic_scsi_scan_tgt */
  134. /*
  135. * snic_tgt_lookup :
  136. */
  137. static struct snic_tgt *
  138. snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
  139. {
  140. struct list_head *cur, *nxt;
  141. struct snic_tgt *tgt = NULL;
  142. list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
  143. tgt = list_entry(cur, struct snic_tgt, list);
  144. if (tgt->id == le32_to_cpu(tgtid->tgt_id))
  145. return tgt;
  146. tgt = NULL;
  147. }
  148. return tgt;
  149. } /* end of snic_tgt_lookup */
  150. /*
  151. * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
  152. */
  153. void
  154. snic_tgt_dev_release(struct device *dev)
  155. {
  156. struct snic_tgt *tgt = dev_to_tgt(dev);
  157. SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
  158. "Target Device ID %d (%s) Permanently Deleted.\n",
  159. tgt->id,
  160. dev_name(dev));
  161. SNIC_BUG_ON(!list_empty(&tgt->list));
  162. kfree(tgt);
  163. }
  164. /*
  165. * snic_tgt_del : work function to delete snic_tgt
  166. */
  167. static void
  168. snic_tgt_del(struct work_struct *work)
  169. {
  170. struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
  171. struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
  172. if (tgt->flags & SNIC_TGT_SCAN_PENDING)
  173. scsi_flush_work(shost);
  174. /* Block IOs on child devices, stops new IOs */
  175. scsi_target_block(&tgt->dev);
  176. /* Cleanup IOs */
  177. snic_tgt_scsi_abort_io(tgt);
  178. /* Unblock IOs now, to flush if there are any. */
  179. scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
  180. /* Delete SCSI Target and sdevs */
  181. scsi_remove_target(&tgt->dev); /* ?? */
  182. device_del(&tgt->dev);
  183. put_device(&tgt->dev);
  184. } /* end of snic_tgt_del */
  185. /* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
  186. * it creates one.
  187. */
  188. static struct snic_tgt *
  189. snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
  190. {
  191. struct snic_tgt *tgt = NULL;
  192. unsigned long flags;
  193. int ret;
  194. tgt = snic_tgt_lookup(snic, tgtid);
  195. if (tgt) {
  196. /* update the information if required */
  197. return tgt;
  198. }
  199. tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
  200. if (!tgt) {
  201. SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
  202. ret = -ENOMEM;
  203. return tgt;
  204. }
  205. INIT_LIST_HEAD(&tgt->list);
  206. tgt->id = le32_to_cpu(tgtid->tgt_id);
  207. tgt->channel = 0;
  208. SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
  209. tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
  210. /*
  211. * Plugging into SML Device Tree
  212. */
  213. tgt->tdata.disc_id = 0;
  214. tgt->state = SNIC_TGT_STAT_INIT;
  215. device_initialize(&tgt->dev);
  216. tgt->dev.parent = get_device(&snic->shost->shost_gendev);
  217. tgt->dev.release = snic_tgt_dev_release;
  218. INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
  219. INIT_WORK(&tgt->del_work, snic_tgt_del);
  220. switch (tgt->tdata.typ) {
  221. case SNIC_TGT_DAS:
  222. dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
  223. snic->shost->host_no, tgt->channel, tgt->id);
  224. break;
  225. case SNIC_TGT_SAN:
  226. dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
  227. snic->shost->host_no, tgt->channel, tgt->id);
  228. break;
  229. default:
  230. SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
  231. dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
  232. snic->shost->host_no, tgt->channel, tgt->id);
  233. break;
  234. }
  235. spin_lock_irqsave(snic->shost->host_lock, flags);
  236. list_add_tail(&tgt->list, &snic->disc.tgt_list);
  237. tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
  238. tgt->state = SNIC_TGT_STAT_ONLINE;
  239. spin_unlock_irqrestore(snic->shost->host_lock, flags);
  240. SNIC_HOST_INFO(snic->shost,
  241. "Tgt %d, type = %s detected. Adding..\n",
  242. tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
  243. ret = device_add(&tgt->dev);
  244. if (ret) {
  245. SNIC_HOST_ERR(snic->shost,
  246. "Snic Tgt: device_add, with err = %d\n",
  247. ret);
  248. put_device(&snic->shost->shost_gendev);
  249. spin_lock_irqsave(snic->shost->host_lock, flags);
  250. list_del(&tgt->list);
  251. spin_unlock_irqrestore(snic->shost->host_lock, flags);
  252. put_device(&tgt->dev);
  253. tgt = NULL;
  254. return tgt;
  255. }
  256. SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
  257. scsi_queue_work(snic->shost, &tgt->scan_work);
  258. return tgt;
  259. } /* end of snic_tgt_create */
  260. /* Handler for discovery */
  261. void
  262. snic_handle_tgt_disc(struct work_struct *work)
  263. {
  264. struct snic *snic = container_of(work, struct snic, tgt_work);
  265. struct snic_tgt_id *tgtid = NULL;
  266. struct snic_tgt *tgt = NULL;
  267. unsigned long flags;
  268. int i;
  269. spin_lock_irqsave(&snic->snic_lock, flags);
  270. if (snic->in_remove) {
  271. spin_unlock_irqrestore(&snic->snic_lock, flags);
  272. kfree(snic->disc.rtgt_info);
  273. return;
  274. }
  275. spin_unlock_irqrestore(&snic->snic_lock, flags);
  276. mutex_lock(&snic->disc.mutex);
  277. /* Discover triggered during disc in progress */
  278. if (snic->disc.req_cnt) {
  279. snic->disc.state = SNIC_DISC_DONE;
  280. snic->disc.req_cnt = 0;
  281. mutex_unlock(&snic->disc.mutex);
  282. kfree(snic->disc.rtgt_info);
  283. snic->disc.rtgt_info = NULL;
  284. SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
  285. /* Start Discovery Again */
  286. snic_disc_start(snic);
  287. return;
  288. }
  289. tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
  290. SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
  291. for (i = 0; i < snic->disc.rtgt_cnt; i++) {
  292. tgt = snic_tgt_create(snic, &tgtid[i]);
  293. if (!tgt) {
  294. int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
  295. SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
  296. snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
  297. break;
  298. }
  299. }
  300. snic->disc.rtgt_info = NULL;
  301. snic->disc.state = SNIC_DISC_DONE;
  302. mutex_unlock(&snic->disc.mutex);
  303. SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
  304. kfree(tgtid);
  305. } /* end of snic_handle_tgt_disc */
  306. int
  307. snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
  308. {
  309. u8 typ, cmpl_stat;
  310. u32 cmnd_id, hid, tgt_cnt = 0;
  311. ulong ctx;
  312. struct snic_req_info *rqi = NULL;
  313. struct snic_tgt_id *tgtid;
  314. int i, ret = 0;
  315. snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
  316. rqi = (struct snic_req_info *) ctx;
  317. tgtid = (struct snic_tgt_id *) rqi->sge_va;
  318. tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
  319. if (tgt_cnt == 0) {
  320. SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
  321. ret = 1;
  322. goto end;
  323. }
  324. /* printing list of targets here */
  325. SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
  326. SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
  327. for (i = 0; i < tgt_cnt; i++)
  328. SNIC_HOST_INFO(snic->shost,
  329. "Tgt id = 0x%x\n",
  330. le32_to_cpu(tgtid[i].tgt_id));
  331. /*
  332. * Queue work for further processing,
  333. * Response Buffer Memory is freed after creating targets
  334. */
  335. snic->disc.rtgt_cnt = tgt_cnt;
  336. snic->disc.rtgt_info = (u8 *) tgtid;
  337. queue_work(snic_glob->event_q, &snic->tgt_work);
  338. ret = 0;
  339. end:
  340. /* Unmap Response Buffer */
  341. snic_pci_unmap_rsp_buf(snic, rqi);
  342. if (ret)
  343. kfree(tgtid);
  344. rqi->sge_va = 0;
  345. snic_release_untagged_req(snic, rqi);
  346. return ret;
  347. } /* end of snic_report_tgt_cmpl_handler */
  348. /* Discovery init fn */
  349. void
  350. snic_disc_init(struct snic_disc *disc)
  351. {
  352. INIT_LIST_HEAD(&disc->tgt_list);
  353. mutex_init(&disc->mutex);
  354. disc->disc_id = 0;
  355. disc->nxt_tgt_id = 0;
  356. disc->state = SNIC_DISC_INIT;
  357. disc->req_cnt = 0;
  358. disc->rtgt_cnt = 0;
  359. disc->rtgt_info = NULL;
  360. disc->cb = NULL;
  361. } /* end of snic_disc_init */
  362. /* Discovery, uninit fn */
  363. void
  364. snic_disc_term(struct snic *snic)
  365. {
  366. struct snic_disc *disc = &snic->disc;
  367. mutex_lock(&disc->mutex);
  368. if (disc->req_cnt) {
  369. disc->req_cnt = 0;
  370. SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
  371. }
  372. mutex_unlock(&disc->mutex);
  373. }
  374. /*
  375. * snic_disc_start: Discovery Start ...
  376. */
  377. int
  378. snic_disc_start(struct snic *snic)
  379. {
  380. struct snic_disc *disc = &snic->disc;
  381. unsigned long flags;
  382. int ret = 0;
  383. SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
  384. spin_lock_irqsave(&snic->snic_lock, flags);
  385. if (snic->in_remove) {
  386. spin_unlock_irqrestore(&snic->snic_lock, flags);
  387. SNIC_ERR("snic driver removal in progress ...\n");
  388. ret = 0;
  389. return ret;
  390. }
  391. spin_unlock_irqrestore(&snic->snic_lock, flags);
  392. mutex_lock(&disc->mutex);
  393. if (disc->state == SNIC_DISC_PENDING) {
  394. disc->req_cnt++;
  395. mutex_unlock(&disc->mutex);
  396. return ret;
  397. }
  398. disc->state = SNIC_DISC_PENDING;
  399. mutex_unlock(&disc->mutex);
  400. ret = snic_queue_report_tgt_req(snic);
  401. if (ret)
  402. SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
  403. return ret;
  404. } /* end of snic_disc_start */
  405. /*
  406. * snic_disc_work :
  407. */
  408. void
  409. snic_handle_disc(struct work_struct *work)
  410. {
  411. struct snic *snic = container_of(work, struct snic, disc_work);
  412. int ret = 0;
  413. SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
  414. ret = snic_disc_start(snic);
  415. if (ret)
  416. goto disc_err;
  417. disc_err:
  418. SNIC_HOST_ERR(snic->shost,
  419. "disc_work: Discovery Failed w/ err = %d\n",
  420. ret);
  421. } /* end of snic_disc_work */
  422. /*
  423. * snic_tgt_del_all : cleanup all snic targets
  424. * Called on unbinding the interface
  425. */
  426. void
  427. snic_tgt_del_all(struct snic *snic)
  428. {
  429. struct snic_tgt *tgt = NULL;
  430. struct list_head *cur, *nxt;
  431. unsigned long flags;
  432. scsi_flush_work(snic->shost);
  433. mutex_lock(&snic->disc.mutex);
  434. spin_lock_irqsave(snic->shost->host_lock, flags);
  435. list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
  436. tgt = list_entry(cur, struct snic_tgt, list);
  437. tgt->state = SNIC_TGT_STAT_DEL;
  438. list_del_init(&tgt->list);
  439. SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
  440. queue_work(snic_glob->event_q, &tgt->del_work);
  441. tgt = NULL;
  442. }
  443. spin_unlock_irqrestore(snic->shost->host_lock, flags);
  444. mutex_unlock(&snic->disc.mutex);
  445. flush_workqueue(snic_glob->event_q);
  446. } /* end of snic_tgt_del_all */