snic_main.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. #include <linux/module.h>
  4. #include <linux/mempool.h>
  5. #include <linux/string.h>
  6. #include <linux/slab.h>
  7. #include <linux/errno.h>
  8. #include <linux/init.h>
  9. #include <linux/pci.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/workqueue.h>
  14. #include <scsi/scsi_host.h>
  15. #include <scsi/scsi_tcq.h>
  16. #include "snic.h"
  17. #include "snic_fwint.h"
  18. #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
  19. /* Supported devices by snic module */
  20. static struct pci_device_id snic_id_table[] = {
  21. {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
  22. { 0, } /* end of table */
  23. };
  24. unsigned int snic_log_level = 0x0;
  25. module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
  26. MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
  27. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  28. unsigned int snic_trace_max_pages = 16;
  29. module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
  30. MODULE_PARM_DESC(snic_trace_max_pages,
  31. "Total allocated memory pages for snic trace buffer");
  32. #endif
  33. unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
  34. module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
  35. MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
  36. /*
  37. * snic_slave_alloc : callback function to SCSI Mid Layer, called on
  38. * scsi device initialization.
  39. */
  40. static int
  41. snic_slave_alloc(struct scsi_device *sdev)
  42. {
  43. struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
  44. if (!tgt || snic_tgt_chkready(tgt))
  45. return -ENXIO;
  46. return 0;
  47. }
  48. /*
  49. * snic_slave_configure : callback function to SCSI Mid Layer, called on
  50. * scsi device initialization.
  51. */
  52. static int
  53. snic_slave_configure(struct scsi_device *sdev)
  54. {
  55. struct snic *snic = shost_priv(sdev->host);
  56. u32 qdepth = 0, max_ios = 0;
  57. int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
  58. /* Set Queue Depth */
  59. max_ios = snic_max_qdepth;
  60. qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
  61. scsi_change_queue_depth(sdev, qdepth);
  62. if (snic->fwinfo.io_tmo > 1)
  63. tmo = snic->fwinfo.io_tmo * HZ;
  64. /* FW requires extended timeouts */
  65. blk_queue_rq_timeout(sdev->request_queue, tmo);
  66. return 0;
  67. }
  68. static int
  69. snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
  70. {
  71. struct snic *snic = shost_priv(sdev->host);
  72. int qsz = 0;
  73. qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
  74. if (qsz < sdev->queue_depth)
  75. atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
  76. else if (qsz > sdev->queue_depth)
  77. atomic64_inc(&snic->s_stats.misc.qsz_rampup);
  78. atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
  79. scsi_change_queue_depth(sdev, qsz);
  80. return sdev->queue_depth;
  81. }
  82. static struct scsi_host_template snic_host_template = {
  83. .module = THIS_MODULE,
  84. .name = SNIC_DRV_NAME,
  85. .queuecommand = snic_queuecommand,
  86. .eh_abort_handler = snic_abort_cmd,
  87. .eh_device_reset_handler = snic_device_reset,
  88. .eh_host_reset_handler = snic_host_reset,
  89. .slave_alloc = snic_slave_alloc,
  90. .slave_configure = snic_slave_configure,
  91. .change_queue_depth = snic_change_queue_depth,
  92. .this_id = -1,
  93. .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
  94. .can_queue = SNIC_MAX_IO_REQ,
  95. .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
  96. .max_sectors = 0x800,
  97. .shost_groups = snic_host_groups,
  98. .track_queue_depth = 1,
  99. .cmd_size = sizeof(struct snic_internal_io_state),
  100. .proc_name = "snic_scsi",
  101. };
  102. /*
  103. * snic_handle_link_event : Handles link events such as link up/down/error
  104. */
  105. void
  106. snic_handle_link_event(struct snic *snic)
  107. {
  108. unsigned long flags;
  109. spin_lock_irqsave(&snic->snic_lock, flags);
  110. if (snic->stop_link_events) {
  111. spin_unlock_irqrestore(&snic->snic_lock, flags);
  112. return;
  113. }
  114. spin_unlock_irqrestore(&snic->snic_lock, flags);
  115. queue_work(snic_glob->event_q, &snic->link_work);
  116. } /* end of snic_handle_link_event */
  117. /*
  118. * snic_notify_set : sets notification area
  119. * This notification area is to receive events from fw
  120. * Note: snic supports only MSIX interrupts, in which we can just call
  121. * svnic_dev_notify_set directly
  122. */
  123. static int
  124. snic_notify_set(struct snic *snic)
  125. {
  126. int ret = 0;
  127. enum vnic_dev_intr_mode intr_mode;
  128. intr_mode = svnic_dev_get_intr_mode(snic->vdev);
  129. if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
  130. ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
  131. } else {
  132. SNIC_HOST_ERR(snic->shost,
  133. "Interrupt mode should be setup before devcmd notify set %d\n",
  134. intr_mode);
  135. ret = -1;
  136. }
  137. return ret;
  138. } /* end of snic_notify_set */
  139. /*
  140. * snic_dev_wait : polls vnic open status.
  141. */
  142. static int
  143. snic_dev_wait(struct vnic_dev *vdev,
  144. int (*start)(struct vnic_dev *, int),
  145. int (*finished)(struct vnic_dev *, int *),
  146. int arg)
  147. {
  148. unsigned long time;
  149. int ret, done;
  150. int retry_cnt = 0;
  151. ret = start(vdev, arg);
  152. if (ret)
  153. return ret;
  154. /*
  155. * Wait for func to complete...2 seconds max.
  156. *
  157. * Sometimes schedule_timeout_uninterruptible take long time
  158. * to wakeup, which results skipping retry. The retry counter
  159. * ensures to retry at least two times.
  160. */
  161. time = jiffies + (HZ * 2);
  162. do {
  163. ret = finished(vdev, &done);
  164. if (ret)
  165. return ret;
  166. if (done)
  167. return 0;
  168. schedule_timeout_uninterruptible(HZ/10);
  169. ++retry_cnt;
  170. } while (time_after(time, jiffies) || (retry_cnt < 3));
  171. return -ETIMEDOUT;
  172. } /* end of snic_dev_wait */
  173. /*
  174. * snic_cleanup: called by snic_remove
  175. * Stops the snic device, masks all interrupts, Completed CQ entries are
  176. * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
  177. */
  178. static int
  179. snic_cleanup(struct snic *snic)
  180. {
  181. unsigned int i;
  182. int ret;
  183. svnic_dev_disable(snic->vdev);
  184. for (i = 0; i < snic->intr_count; i++)
  185. svnic_intr_mask(&snic->intr[i]);
  186. for (i = 0; i < snic->wq_count; i++) {
  187. ret = svnic_wq_disable(&snic->wq[i]);
  188. if (ret)
  189. return ret;
  190. }
  191. /* Clean up completed IOs */
  192. snic_fwcq_cmpl_handler(snic, -1);
  193. snic_wq_cmpl_handler(snic, -1);
  194. /* Clean up the IOs that have not completed */
  195. for (i = 0; i < snic->wq_count; i++)
  196. svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
  197. for (i = 0; i < snic->cq_count; i++)
  198. svnic_cq_clean(&snic->cq[i]);
  199. for (i = 0; i < snic->intr_count; i++)
  200. svnic_intr_clean(&snic->intr[i]);
  201. /* Cleanup snic specific requests */
  202. snic_free_all_untagged_reqs(snic);
  203. /* Cleanup Pending SCSI commands */
  204. snic_shutdown_scsi_cleanup(snic);
  205. for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
  206. mempool_destroy(snic->req_pool[i]);
  207. return 0;
  208. } /* end of snic_cleanup */
  209. static void
  210. snic_iounmap(struct snic *snic)
  211. {
  212. if (snic->bar0.vaddr)
  213. iounmap(snic->bar0.vaddr);
  214. }
  215. /*
  216. * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
  217. */
  218. static int
  219. snic_vdev_open_done(struct vnic_dev *vdev, int *done)
  220. {
  221. struct snic *snic = svnic_dev_priv(vdev);
  222. int ret;
  223. int nretries = 5;
  224. do {
  225. ret = svnic_dev_open_done(vdev, done);
  226. if (ret == 0)
  227. break;
  228. SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
  229. } while (nretries--);
  230. return ret;
  231. } /* end of snic_vdev_open_done */
  232. /*
  233. * snic_add_host : registers scsi host with ML
  234. */
  235. static int
  236. snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
  237. {
  238. int ret = 0;
  239. ret = scsi_add_host(shost, &pdev->dev);
  240. if (ret) {
  241. SNIC_HOST_ERR(shost,
  242. "snic: scsi_add_host failed. %d\n",
  243. ret);
  244. return ret;
  245. }
  246. SNIC_BUG_ON(shost->work_q != NULL);
  247. snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
  248. shost->host_no);
  249. shost->work_q = create_singlethread_workqueue(shost->work_q_name);
  250. if (!shost->work_q) {
  251. SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
  252. ret = -ENOMEM;
  253. }
  254. return ret;
  255. } /* end of snic_add_host */
  256. static void
  257. snic_del_host(struct Scsi_Host *shost)
  258. {
  259. if (!shost->work_q)
  260. return;
  261. destroy_workqueue(shost->work_q);
  262. shost->work_q = NULL;
  263. scsi_remove_host(shost);
  264. }
  265. int
  266. snic_get_state(struct snic *snic)
  267. {
  268. return atomic_read(&snic->state);
  269. }
  270. void
  271. snic_set_state(struct snic *snic, enum snic_state state)
  272. {
  273. SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
  274. snic_state_to_str(snic_get_state(snic)),
  275. snic_state_to_str(state));
  276. atomic_set(&snic->state, state);
  277. }
  278. /*
  279. * snic_probe : Initialize the snic interface.
  280. */
  281. static int
  282. snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  283. {
  284. struct Scsi_Host *shost;
  285. struct snic *snic;
  286. mempool_t *pool;
  287. unsigned long flags;
  288. u32 max_ios = 0;
  289. int ret, i;
  290. /* Device Information */
  291. SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
  292. pdev->vendor, pdev->device, pdev->subsystem_vendor,
  293. pdev->subsystem_device);
  294. SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
  295. pdev->bus->number, PCI_SLOT(pdev->devfn),
  296. PCI_FUNC(pdev->devfn));
  297. /*
  298. * Allocate SCSI Host and setup association between host, and snic
  299. */
  300. shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
  301. if (!shost) {
  302. SNIC_ERR("Unable to alloc scsi_host\n");
  303. ret = -ENOMEM;
  304. goto prob_end;
  305. }
  306. snic = shost_priv(shost);
  307. snic->shost = shost;
  308. snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
  309. shost->host_no);
  310. SNIC_HOST_INFO(shost,
  311. "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
  312. shost->host_no, snic, shost, pdev->bus->number,
  313. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  314. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  315. /* Per snic debugfs init */
  316. snic_stats_debugfs_init(snic);
  317. #endif
  318. /* Setup PCI Resources */
  319. pci_set_drvdata(pdev, snic);
  320. snic->pdev = pdev;
  321. ret = pci_enable_device(pdev);
  322. if (ret) {
  323. SNIC_HOST_ERR(shost,
  324. "Cannot enable PCI Resources, aborting : %d\n",
  325. ret);
  326. goto err_free_snic;
  327. }
  328. ret = pci_request_regions(pdev, SNIC_DRV_NAME);
  329. if (ret) {
  330. SNIC_HOST_ERR(shost,
  331. "Cannot obtain PCI Resources, aborting : %d\n",
  332. ret);
  333. goto err_pci_disable;
  334. }
  335. pci_set_master(pdev);
  336. /*
  337. * Query PCI Controller on system for DMA addressing
  338. * limitation for the device. Try 43-bit first, and
  339. * fail to 32-bit.
  340. */
  341. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
  342. if (ret) {
  343. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  344. if (ret) {
  345. SNIC_HOST_ERR(shost,
  346. "No Usable DMA Configuration, aborting %d\n",
  347. ret);
  348. goto err_rel_regions;
  349. }
  350. }
  351. /* Map vNIC resources from BAR0 */
  352. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  353. SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
  354. ret = -ENODEV;
  355. goto err_rel_regions;
  356. }
  357. snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
  358. if (!snic->bar0.vaddr) {
  359. SNIC_HOST_ERR(shost,
  360. "Cannot memory map BAR0 res hdr aborting.\n");
  361. ret = -ENODEV;
  362. goto err_rel_regions;
  363. }
  364. snic->bar0.bus_addr = pci_resource_start(pdev, 0);
  365. snic->bar0.len = pci_resource_len(pdev, 0);
  366. SNIC_BUG_ON(snic->bar0.bus_addr == 0);
  367. /* Devcmd2 Resource Allocation and Initialization */
  368. snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
  369. if (!snic->vdev) {
  370. SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
  371. ret = -ENODEV;
  372. goto err_iounmap;
  373. }
  374. ret = svnic_dev_cmd_init(snic->vdev, 0);
  375. if (ret) {
  376. SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
  377. goto err_vnic_unreg;
  378. }
  379. ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
  380. if (ret) {
  381. SNIC_HOST_ERR(shost,
  382. "vNIC dev open failed, aborting. %d\n",
  383. ret);
  384. goto err_vnic_unreg;
  385. }
  386. ret = svnic_dev_init(snic->vdev, 0);
  387. if (ret) {
  388. SNIC_HOST_ERR(shost,
  389. "vNIC dev init failed. aborting. %d\n",
  390. ret);
  391. goto err_dev_close;
  392. }
  393. /* Get vNIC information */
  394. ret = snic_get_vnic_config(snic);
  395. if (ret) {
  396. SNIC_HOST_ERR(shost,
  397. "Get vNIC configuration failed, aborting. %d\n",
  398. ret);
  399. goto err_dev_close;
  400. }
  401. /* Configure Maximum Outstanding IO reqs */
  402. max_ios = snic->config.io_throttle_count;
  403. if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
  404. shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
  405. max_t(u32, SNIC_MIN_IO_REQ, max_ios));
  406. snic->max_tag_id = shost->can_queue;
  407. shost->max_lun = snic->config.luns_per_tgt;
  408. shost->max_id = SNIC_MAX_TARGET;
  409. shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
  410. snic_get_res_counts(snic);
  411. /*
  412. * Assumption: Only MSIx is supported
  413. */
  414. ret = snic_set_intr_mode(snic);
  415. if (ret) {
  416. SNIC_HOST_ERR(shost,
  417. "Failed to set intr mode aborting. %d\n",
  418. ret);
  419. goto err_dev_close;
  420. }
  421. ret = snic_alloc_vnic_res(snic);
  422. if (ret) {
  423. SNIC_HOST_ERR(shost,
  424. "Failed to alloc vNIC resources aborting. %d\n",
  425. ret);
  426. goto err_clear_intr;
  427. }
  428. /* Initialize specific lists */
  429. INIT_LIST_HEAD(&snic->list);
  430. /*
  431. * spl_cmd_list for maintaining snic specific cmds
  432. * such as EXCH_VER_REQ, REPORT_TARGETS etc
  433. */
  434. INIT_LIST_HEAD(&snic->spl_cmd_list);
  435. spin_lock_init(&snic->spl_cmd_lock);
  436. /* initialize all snic locks */
  437. spin_lock_init(&snic->snic_lock);
  438. for (i = 0; i < SNIC_WQ_MAX; i++)
  439. spin_lock_init(&snic->wq_lock[i]);
  440. for (i = 0; i < SNIC_IO_LOCKS; i++)
  441. spin_lock_init(&snic->io_req_lock[i]);
  442. pool = mempool_create_slab_pool(2,
  443. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  444. if (!pool) {
  445. SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
  446. ret = -ENOMEM;
  447. goto err_free_res;
  448. }
  449. snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
  450. pool = mempool_create_slab_pool(2,
  451. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  452. if (!pool) {
  453. SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
  454. ret = -ENOMEM;
  455. goto err_free_dflt_sgl_pool;
  456. }
  457. snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
  458. pool = mempool_create_slab_pool(2,
  459. snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  460. if (!pool) {
  461. SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
  462. ret = -ENOMEM;
  463. goto err_free_max_sgl_pool;
  464. }
  465. snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
  466. /* Initialize snic state */
  467. atomic_set(&snic->state, SNIC_INIT);
  468. atomic_set(&snic->ios_inflight, 0);
  469. /* Setup notification buffer area */
  470. ret = snic_notify_set(snic);
  471. if (ret) {
  472. SNIC_HOST_ERR(shost,
  473. "Failed to alloc notify buffer aborting. %d\n",
  474. ret);
  475. goto err_free_tmreq_pool;
  476. }
  477. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  478. list_add_tail(&snic->list, &snic_glob->snic_list);
  479. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  480. snic_disc_init(&snic->disc);
  481. INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
  482. INIT_WORK(&snic->disc_work, snic_handle_disc);
  483. INIT_WORK(&snic->link_work, snic_handle_link);
  484. /* Enable all queues */
  485. for (i = 0; i < snic->wq_count; i++)
  486. svnic_wq_enable(&snic->wq[i]);
  487. ret = svnic_dev_enable_wait(snic->vdev);
  488. if (ret) {
  489. SNIC_HOST_ERR(shost,
  490. "vNIC dev enable failed w/ error %d\n",
  491. ret);
  492. goto err_vdev_enable;
  493. }
  494. ret = snic_request_intr(snic);
  495. if (ret) {
  496. SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
  497. goto err_req_intr;
  498. }
  499. for (i = 0; i < snic->intr_count; i++)
  500. svnic_intr_unmask(&snic->intr[i]);
  501. /* Get snic params */
  502. ret = snic_get_conf(snic);
  503. if (ret) {
  504. SNIC_HOST_ERR(shost,
  505. "Failed to get snic io config from FW w err %d\n",
  506. ret);
  507. goto err_get_conf;
  508. }
  509. /*
  510. * Initialization done with PCI system, hardware, firmware.
  511. * Add shost to SCSI
  512. */
  513. ret = snic_add_host(shost, pdev);
  514. if (ret) {
  515. SNIC_HOST_ERR(shost,
  516. "Adding scsi host Failed ... exiting. %d\n",
  517. ret);
  518. goto err_get_conf;
  519. }
  520. snic_set_state(snic, SNIC_ONLINE);
  521. ret = snic_disc_start(snic);
  522. if (ret) {
  523. SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
  524. ret);
  525. goto err_get_conf;
  526. }
  527. SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
  528. return 0;
  529. err_get_conf:
  530. snic_free_all_untagged_reqs(snic);
  531. for (i = 0; i < snic->intr_count; i++)
  532. svnic_intr_mask(&snic->intr[i]);
  533. snic_free_intr(snic);
  534. err_req_intr:
  535. svnic_dev_disable(snic->vdev);
  536. err_vdev_enable:
  537. svnic_dev_notify_unset(snic->vdev);
  538. for (i = 0; i < snic->wq_count; i++) {
  539. int rc = 0;
  540. rc = svnic_wq_disable(&snic->wq[i]);
  541. if (rc) {
  542. SNIC_HOST_ERR(shost,
  543. "WQ Disable Failed w/ err = %d\n", rc);
  544. break;
  545. }
  546. }
  547. snic_del_host(snic->shost);
  548. err_free_tmreq_pool:
  549. mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
  550. err_free_max_sgl_pool:
  551. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
  552. err_free_dflt_sgl_pool:
  553. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
  554. err_free_res:
  555. snic_free_vnic_res(snic);
  556. err_clear_intr:
  557. snic_clear_intr_mode(snic);
  558. err_dev_close:
  559. svnic_dev_close(snic->vdev);
  560. err_vnic_unreg:
  561. svnic_dev_unregister(snic->vdev);
  562. err_iounmap:
  563. snic_iounmap(snic);
  564. err_rel_regions:
  565. pci_release_regions(pdev);
  566. err_pci_disable:
  567. pci_disable_device(pdev);
  568. err_free_snic:
  569. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  570. snic_stats_debugfs_remove(snic);
  571. #endif
  572. scsi_host_put(shost);
  573. pci_set_drvdata(pdev, NULL);
  574. prob_end:
  575. SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
  576. pdev->bus->number, PCI_SLOT(pdev->devfn),
  577. PCI_FUNC(pdev->devfn));
  578. return ret;
  579. } /* end of snic_probe */
  580. /*
  581. * snic_remove : invoked on unbinding the interface to cleanup the
  582. * resources allocated in snic_probe on initialization.
  583. */
  584. static void
  585. snic_remove(struct pci_dev *pdev)
  586. {
  587. struct snic *snic = pci_get_drvdata(pdev);
  588. unsigned long flags;
  589. if (!snic) {
  590. SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
  591. pdev->bus->number, PCI_SLOT(pdev->devfn),
  592. PCI_FUNC(pdev->devfn));
  593. return;
  594. }
  595. /*
  596. * Mark state so that the workqueue thread stops forwarding
  597. * received frames and link events. ISR and other threads
  598. * that can queue work items will also stop creating work
  599. * items on the snic workqueue
  600. */
  601. snic_set_state(snic, SNIC_OFFLINE);
  602. spin_lock_irqsave(&snic->snic_lock, flags);
  603. snic->stop_link_events = 1;
  604. spin_unlock_irqrestore(&snic->snic_lock, flags);
  605. flush_workqueue(snic_glob->event_q);
  606. snic_disc_term(snic);
  607. spin_lock_irqsave(&snic->snic_lock, flags);
  608. snic->in_remove = 1;
  609. spin_unlock_irqrestore(&snic->snic_lock, flags);
  610. /*
  611. * This stops the snic device, masks all interrupts, Completed
  612. * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
  613. * cleanup
  614. */
  615. snic_cleanup(snic);
  616. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  617. list_del(&snic->list);
  618. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  619. snic_tgt_del_all(snic);
  620. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  621. snic_stats_debugfs_remove(snic);
  622. #endif
  623. snic_del_host(snic->shost);
  624. svnic_dev_notify_unset(snic->vdev);
  625. snic_free_intr(snic);
  626. snic_free_vnic_res(snic);
  627. snic_clear_intr_mode(snic);
  628. svnic_dev_close(snic->vdev);
  629. svnic_dev_unregister(snic->vdev);
  630. snic_iounmap(snic);
  631. pci_release_regions(pdev);
  632. pci_disable_device(pdev);
  633. pci_set_drvdata(pdev, NULL);
  634. /* this frees Scsi_Host and snic memory (continuous chunk) */
  635. scsi_host_put(snic->shost);
  636. } /* end of snic_remove */
  637. struct snic_global *snic_glob;
  638. /*
  639. * snic_global_data_init: Initialize SNIC Global Data
  640. * Notes: All the global lists, variables should be part of global data
  641. * this helps in debugging.
  642. */
  643. static int
  644. snic_global_data_init(void)
  645. {
  646. int ret = 0;
  647. struct kmem_cache *cachep;
  648. ssize_t len = 0;
  649. snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
  650. if (!snic_glob) {
  651. SNIC_ERR("Failed to allocate Global Context.\n");
  652. ret = -ENOMEM;
  653. goto gdi_end;
  654. }
  655. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  656. /* Debugfs related Initialization */
  657. /* Create debugfs entries for snic */
  658. snic_debugfs_init();
  659. /* Trace related Initialization */
  660. /* Allocate memory for trace buffer */
  661. ret = snic_trc_init();
  662. if (ret < 0) {
  663. SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
  664. snic_trc_free();
  665. /* continue even if it fails */
  666. }
  667. #endif
  668. INIT_LIST_HEAD(&snic_glob->snic_list);
  669. spin_lock_init(&snic_glob->snic_list_lock);
  670. /* Create a cache for allocation of snic_host_req+default size ESGLs */
  671. len = sizeof(struct snic_req_info);
  672. len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
  673. cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
  674. SLAB_HWCACHE_ALIGN, NULL);
  675. if (!cachep) {
  676. SNIC_ERR("Failed to create snic default sgl slab\n");
  677. ret = -ENOMEM;
  678. goto err_dflt_req_slab;
  679. }
  680. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
  681. /* Create a cache for allocation of max size Extended SGLs */
  682. len = sizeof(struct snic_req_info);
  683. len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
  684. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  685. SLAB_HWCACHE_ALIGN, NULL);
  686. if (!cachep) {
  687. SNIC_ERR("Failed to create snic max sgl slab\n");
  688. ret = -ENOMEM;
  689. goto err_max_req_slab;
  690. }
  691. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
  692. len = sizeof(struct snic_host_req);
  693. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  694. SLAB_HWCACHE_ALIGN, NULL);
  695. if (!cachep) {
  696. SNIC_ERR("Failed to create snic tm req slab\n");
  697. ret = -ENOMEM;
  698. goto err_tmreq_slab;
  699. }
  700. snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
  701. /* snic_event queue */
  702. snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
  703. if (!snic_glob->event_q) {
  704. SNIC_ERR("snic event queue create failed\n");
  705. ret = -ENOMEM;
  706. goto err_eventq;
  707. }
  708. return ret;
  709. err_eventq:
  710. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  711. err_tmreq_slab:
  712. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  713. err_max_req_slab:
  714. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  715. err_dflt_req_slab:
  716. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  717. snic_trc_free();
  718. snic_debugfs_term();
  719. #endif
  720. kfree(snic_glob);
  721. snic_glob = NULL;
  722. gdi_end:
  723. return ret;
  724. } /* end of snic_glob_init */
  725. /*
  726. * snic_global_data_cleanup : Frees SNIC Global Data
  727. */
  728. static void
  729. snic_global_data_cleanup(void)
  730. {
  731. SNIC_BUG_ON(snic_glob == NULL);
  732. destroy_workqueue(snic_glob->event_q);
  733. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  734. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  735. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  736. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  737. /* Freeing Trace Resources */
  738. snic_trc_free();
  739. /* Freeing Debugfs Resources */
  740. snic_debugfs_term();
  741. #endif
  742. kfree(snic_glob);
  743. snic_glob = NULL;
  744. } /* end of snic_glob_cleanup */
  745. static struct pci_driver snic_driver = {
  746. .name = SNIC_DRV_NAME,
  747. .id_table = snic_id_table,
  748. .probe = snic_probe,
  749. .remove = snic_remove,
  750. };
  751. static int __init
  752. snic_init_module(void)
  753. {
  754. int ret = 0;
  755. #ifndef __x86_64__
  756. SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
  757. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  758. #endif
  759. SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
  760. ret = snic_global_data_init();
  761. if (ret) {
  762. SNIC_ERR("Failed to Initialize Global Data.\n");
  763. return ret;
  764. }
  765. ret = pci_register_driver(&snic_driver);
  766. if (ret < 0) {
  767. SNIC_ERR("PCI driver register error\n");
  768. goto err_pci_reg;
  769. }
  770. return ret;
  771. err_pci_reg:
  772. snic_global_data_cleanup();
  773. return ret;
  774. }
  775. static void __exit
  776. snic_cleanup_module(void)
  777. {
  778. pci_unregister_driver(&snic_driver);
  779. snic_global_data_cleanup();
  780. }
  781. module_init(snic_init_module);
  782. module_exit(snic_cleanup_module);
  783. MODULE_LICENSE("GPL v2");
  784. MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
  785. MODULE_VERSION(SNIC_DRV_VERSION);
  786. MODULE_DEVICE_TABLE(pci, snic_id_table);
  787. MODULE_AUTHOR("Narsimhulu Musini <[email protected]>, "
  788. "Sesidhar Baddela <[email protected]>");