hosts.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * hosts.c Copyright (C) 1992 Drew Eckhardt
  4. * Copyright (C) 1993, 1994, 1995 Eric Youngdale
  5. * Copyright (C) 2002-2003 Christoph Hellwig
  6. *
  7. * mid to lowlevel SCSI driver interface
  8. * Initial versions: Drew Eckhardt
  9. * Subsequent revisions: Eric Youngdale
  10. *
  11. * <[email protected]>
  12. *
  13. * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
  14. * Added QLOGIC QLA1280 SCSI controller kernel host support.
  15. * August 4, 1999 Fred Lewis, Intel DuPont
  16. *
  17. * Updated to reflect the new initialization scheme for the higher
  18. * level of scsi drivers (sd/sr/st)
  19. * September 17, 2000 Torben Mathiasen <[email protected]>
  20. *
  21. * Restructured scsi_host lists and associated functions.
  22. * September 04, 2002 Mike Anderson ([email protected])
  23. */
  24. #include <linux/module.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/kthread.h>
  29. #include <linux/string.h>
  30. #include <linux/mm.h>
  31. #include <linux/init.h>
  32. #include <linux/completion.h>
  33. #include <linux/transport_class.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/pm_runtime.h>
  36. #include <linux/idr.h>
  37. #include <scsi/scsi_device.h>
  38. #include <scsi/scsi_host.h>
  39. #include <scsi/scsi_transport.h>
  40. #include <scsi/scsi_cmnd.h>
  41. #include "scsi_priv.h"
  42. #include "scsi_logging.h"
  43. static int shost_eh_deadline = -1;
  44. module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
  45. MODULE_PARM_DESC(eh_deadline,
  46. "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
  47. static DEFINE_IDA(host_index_ida);
  48. static void scsi_host_cls_release(struct device *dev)
  49. {
  50. put_device(&class_to_shost(dev)->shost_gendev);
  51. }
  52. static struct class shost_class = {
  53. .name = "scsi_host",
  54. .dev_release = scsi_host_cls_release,
  55. .dev_groups = scsi_shost_groups,
  56. };
  57. /**
  58. * scsi_host_set_state - Take the given host through the host state model.
  59. * @shost: scsi host to change the state of.
  60. * @state: state to change to.
  61. *
  62. * Returns zero if unsuccessful or an error if the requested
  63. * transition is illegal.
  64. **/
  65. int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
  66. {
  67. enum scsi_host_state oldstate = shost->shost_state;
  68. if (state == oldstate)
  69. return 0;
  70. switch (state) {
  71. case SHOST_CREATED:
  72. /* There are no legal states that come back to
  73. * created. This is the manually initialised start
  74. * state */
  75. goto illegal;
  76. case SHOST_RUNNING:
  77. switch (oldstate) {
  78. case SHOST_CREATED:
  79. case SHOST_RECOVERY:
  80. break;
  81. default:
  82. goto illegal;
  83. }
  84. break;
  85. case SHOST_RECOVERY:
  86. switch (oldstate) {
  87. case SHOST_RUNNING:
  88. break;
  89. default:
  90. goto illegal;
  91. }
  92. break;
  93. case SHOST_CANCEL:
  94. switch (oldstate) {
  95. case SHOST_CREATED:
  96. case SHOST_RUNNING:
  97. case SHOST_CANCEL_RECOVERY:
  98. break;
  99. default:
  100. goto illegal;
  101. }
  102. break;
  103. case SHOST_DEL:
  104. switch (oldstate) {
  105. case SHOST_CANCEL:
  106. case SHOST_DEL_RECOVERY:
  107. break;
  108. default:
  109. goto illegal;
  110. }
  111. break;
  112. case SHOST_CANCEL_RECOVERY:
  113. switch (oldstate) {
  114. case SHOST_CANCEL:
  115. case SHOST_RECOVERY:
  116. break;
  117. default:
  118. goto illegal;
  119. }
  120. break;
  121. case SHOST_DEL_RECOVERY:
  122. switch (oldstate) {
  123. case SHOST_CANCEL_RECOVERY:
  124. break;
  125. default:
  126. goto illegal;
  127. }
  128. break;
  129. }
  130. shost->shost_state = state;
  131. return 0;
  132. illegal:
  133. SCSI_LOG_ERROR_RECOVERY(1,
  134. shost_printk(KERN_ERR, shost,
  135. "Illegal host state transition"
  136. "%s->%s\n",
  137. scsi_host_state_name(oldstate),
  138. scsi_host_state_name(state)));
  139. return -EINVAL;
  140. }
  141. /**
  142. * scsi_remove_host - remove a scsi host
  143. * @shost: a pointer to a scsi host to remove
  144. **/
  145. void scsi_remove_host(struct Scsi_Host *shost)
  146. {
  147. unsigned long flags;
  148. mutex_lock(&shost->scan_mutex);
  149. spin_lock_irqsave(shost->host_lock, flags);
  150. if (scsi_host_set_state(shost, SHOST_CANCEL))
  151. if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
  152. spin_unlock_irqrestore(shost->host_lock, flags);
  153. mutex_unlock(&shost->scan_mutex);
  154. return;
  155. }
  156. spin_unlock_irqrestore(shost->host_lock, flags);
  157. scsi_autopm_get_host(shost);
  158. flush_workqueue(shost->tmf_work_q);
  159. scsi_forget_host(shost);
  160. mutex_unlock(&shost->scan_mutex);
  161. scsi_proc_host_rm(shost);
  162. scsi_proc_hostdir_rm(shost->hostt);
  163. /*
  164. * New SCSI devices cannot be attached anymore because of the SCSI host
  165. * state so drop the tag set refcnt. Wait until the tag set refcnt drops
  166. * to zero because .exit_cmd_priv implementations may need the host
  167. * pointer.
  168. */
  169. kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
  170. wait_for_completion(&shost->tagset_freed);
  171. spin_lock_irqsave(shost->host_lock, flags);
  172. if (scsi_host_set_state(shost, SHOST_DEL))
  173. BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
  174. spin_unlock_irqrestore(shost->host_lock, flags);
  175. transport_unregister_device(&shost->shost_gendev);
  176. device_unregister(&shost->shost_dev);
  177. device_del(&shost->shost_gendev);
  178. }
  179. EXPORT_SYMBOL(scsi_remove_host);
  180. /**
  181. * scsi_add_host_with_dma - add a scsi host with dma device
  182. * @shost: scsi host pointer to add
  183. * @dev: a struct device of type scsi class
  184. * @dma_dev: dma device for the host
  185. *
  186. * Note: You rarely need to worry about this unless you're in a
  187. * virtualised host environments, so use the simpler scsi_add_host()
  188. * function instead.
  189. *
  190. * Return value:
  191. * 0 on success / != 0 for error
  192. **/
  193. int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
  194. struct device *dma_dev)
  195. {
  196. struct scsi_host_template *sht = shost->hostt;
  197. int error = -EINVAL;
  198. shost_printk(KERN_INFO, shost, "%s\n",
  199. sht->info ? sht->info(shost) : sht->name);
  200. if (!shost->can_queue) {
  201. shost_printk(KERN_ERR, shost,
  202. "can_queue = 0 no longer supported\n");
  203. goto fail;
  204. }
  205. /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
  206. shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
  207. shost->can_queue);
  208. error = scsi_init_sense_cache(shost);
  209. if (error)
  210. goto fail;
  211. if (!shost->shost_gendev.parent)
  212. shost->shost_gendev.parent = dev ? dev : &platform_bus;
  213. if (!dma_dev)
  214. dma_dev = shost->shost_gendev.parent;
  215. shost->dma_dev = dma_dev;
  216. if (dma_dev->dma_mask) {
  217. shost->max_sectors = min_t(unsigned int, shost->max_sectors,
  218. dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
  219. }
  220. error = scsi_mq_setup_tags(shost);
  221. if (error)
  222. goto fail;
  223. kref_init(&shost->tagset_refcnt);
  224. init_completion(&shost->tagset_freed);
  225. /*
  226. * Increase usage count temporarily here so that calling
  227. * scsi_autopm_put_host() will trigger runtime idle if there is
  228. * nothing else preventing suspending the device.
  229. */
  230. pm_runtime_get_noresume(&shost->shost_gendev);
  231. pm_runtime_set_active(&shost->shost_gendev);
  232. pm_runtime_enable(&shost->shost_gendev);
  233. device_enable_async_suspend(&shost->shost_gendev);
  234. error = device_add(&shost->shost_gendev);
  235. if (error)
  236. goto out_disable_runtime_pm;
  237. scsi_host_set_state(shost, SHOST_RUNNING);
  238. get_device(shost->shost_gendev.parent);
  239. device_enable_async_suspend(&shost->shost_dev);
  240. get_device(&shost->shost_gendev);
  241. error = device_add(&shost->shost_dev);
  242. if (error)
  243. goto out_del_gendev;
  244. if (shost->transportt->host_size) {
  245. shost->shost_data = kzalloc(shost->transportt->host_size,
  246. GFP_KERNEL);
  247. if (shost->shost_data == NULL) {
  248. error = -ENOMEM;
  249. goto out_del_dev;
  250. }
  251. }
  252. if (shost->transportt->create_work_queue) {
  253. snprintf(shost->work_q_name, sizeof(shost->work_q_name),
  254. "scsi_wq_%d", shost->host_no);
  255. shost->work_q = alloc_workqueue("%s",
  256. WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
  257. 1, shost->work_q_name);
  258. if (!shost->work_q) {
  259. error = -EINVAL;
  260. goto out_del_dev;
  261. }
  262. }
  263. error = scsi_sysfs_add_host(shost);
  264. if (error)
  265. goto out_del_dev;
  266. scsi_proc_host_add(shost);
  267. scsi_autopm_put_host(shost);
  268. return error;
  269. /*
  270. * Any host allocation in this function will be freed in
  271. * scsi_host_dev_release().
  272. */
  273. out_del_dev:
  274. device_del(&shost->shost_dev);
  275. out_del_gendev:
  276. /*
  277. * Host state is SHOST_RUNNING so we have to explicitly release
  278. * ->shost_dev.
  279. */
  280. put_device(&shost->shost_dev);
  281. device_del(&shost->shost_gendev);
  282. out_disable_runtime_pm:
  283. device_disable_async_suspend(&shost->shost_gendev);
  284. pm_runtime_disable(&shost->shost_gendev);
  285. pm_runtime_set_suspended(&shost->shost_gendev);
  286. pm_runtime_put_noidle(&shost->shost_gendev);
  287. kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
  288. fail:
  289. return error;
  290. }
  291. EXPORT_SYMBOL(scsi_add_host_with_dma);
  292. static void scsi_host_dev_release(struct device *dev)
  293. {
  294. struct Scsi_Host *shost = dev_to_shost(dev);
  295. struct device *parent = dev->parent;
  296. /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
  297. rcu_barrier();
  298. if (shost->tmf_work_q)
  299. destroy_workqueue(shost->tmf_work_q);
  300. if (shost->ehandler)
  301. kthread_stop(shost->ehandler);
  302. if (shost->work_q)
  303. destroy_workqueue(shost->work_q);
  304. if (shost->shost_state == SHOST_CREATED) {
  305. /*
  306. * Free the shost_dev device name here if scsi_host_alloc()
  307. * and scsi_host_put() have been called but neither
  308. * scsi_host_add() nor scsi_host_remove() has been called.
  309. * This avoids that the memory allocated for the shost_dev
  310. * name is leaked.
  311. */
  312. kfree(dev_name(&shost->shost_dev));
  313. }
  314. kfree(shost->shost_data);
  315. ida_free(&host_index_ida, shost->host_no);
  316. if (shost->shost_state != SHOST_CREATED)
  317. put_device(parent);
  318. kfree(shost);
  319. }
  320. static struct device_type scsi_host_type = {
  321. .name = "scsi_host",
  322. .release = scsi_host_dev_release,
  323. };
  324. /**
  325. * scsi_host_alloc - register a scsi host adapter instance.
  326. * @sht: pointer to scsi host template
  327. * @privsize: extra bytes to allocate for driver
  328. *
  329. * Note:
  330. * Allocate a new Scsi_Host and perform basic initialization.
  331. * The host is not published to the scsi midlayer until scsi_add_host
  332. * is called.
  333. *
  334. * Return value:
  335. * Pointer to a new Scsi_Host
  336. **/
  337. struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
  338. {
  339. struct Scsi_Host *shost;
  340. int index;
  341. shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
  342. if (!shost)
  343. return NULL;
  344. shost->host_lock = &shost->default_lock;
  345. spin_lock_init(shost->host_lock);
  346. shost->shost_state = SHOST_CREATED;
  347. INIT_LIST_HEAD(&shost->__devices);
  348. INIT_LIST_HEAD(&shost->__targets);
  349. INIT_LIST_HEAD(&shost->eh_abort_list);
  350. INIT_LIST_HEAD(&shost->eh_cmd_q);
  351. INIT_LIST_HEAD(&shost->starved_list);
  352. init_waitqueue_head(&shost->host_wait);
  353. mutex_init(&shost->scan_mutex);
  354. index = ida_alloc(&host_index_ida, GFP_KERNEL);
  355. if (index < 0) {
  356. kfree(shost);
  357. return NULL;
  358. }
  359. shost->host_no = index;
  360. shost->dma_channel = 0xff;
  361. /* These three are default values which can be overridden */
  362. shost->max_channel = 0;
  363. shost->max_id = 8;
  364. shost->max_lun = 8;
  365. /* Give each shost a default transportt */
  366. shost->transportt = &blank_transport_template;
  367. /*
  368. * All drivers right now should be able to handle 12 byte
  369. * commands. Every so often there are requests for 16 byte
  370. * commands, but individual low-level drivers need to certify that
  371. * they actually do something sensible with such commands.
  372. */
  373. shost->max_cmd_len = 12;
  374. shost->hostt = sht;
  375. shost->this_id = sht->this_id;
  376. shost->can_queue = sht->can_queue;
  377. shost->sg_tablesize = sht->sg_tablesize;
  378. shost->sg_prot_tablesize = sht->sg_prot_tablesize;
  379. shost->cmd_per_lun = sht->cmd_per_lun;
  380. shost->no_write_same = sht->no_write_same;
  381. shost->host_tagset = sht->host_tagset;
  382. if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
  383. shost->eh_deadline = -1;
  384. else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
  385. shost_printk(KERN_WARNING, shost,
  386. "eh_deadline %u too large, setting to %u\n",
  387. shost_eh_deadline, INT_MAX / HZ);
  388. shost->eh_deadline = INT_MAX;
  389. } else
  390. shost->eh_deadline = shost_eh_deadline * HZ;
  391. if (sht->supported_mode == MODE_UNKNOWN)
  392. /* means we didn't set it ... default to INITIATOR */
  393. shost->active_mode = MODE_INITIATOR;
  394. else
  395. shost->active_mode = sht->supported_mode;
  396. if (sht->max_host_blocked)
  397. shost->max_host_blocked = sht->max_host_blocked;
  398. else
  399. shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
  400. /*
  401. * If the driver imposes no hard sector transfer limit, start at
  402. * machine infinity initially.
  403. */
  404. if (sht->max_sectors)
  405. shost->max_sectors = sht->max_sectors;
  406. else
  407. shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
  408. if (sht->max_segment_size)
  409. shost->max_segment_size = sht->max_segment_size;
  410. else
  411. shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
  412. /*
  413. * assume a 4GB boundary, if not set
  414. */
  415. if (sht->dma_boundary)
  416. shost->dma_boundary = sht->dma_boundary;
  417. else
  418. shost->dma_boundary = 0xffffffff;
  419. if (sht->virt_boundary_mask)
  420. shost->virt_boundary_mask = sht->virt_boundary_mask;
  421. device_initialize(&shost->shost_gendev);
  422. dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
  423. shost->shost_gendev.bus = &scsi_bus_type;
  424. shost->shost_gendev.type = &scsi_host_type;
  425. scsi_enable_async_suspend(&shost->shost_gendev);
  426. device_initialize(&shost->shost_dev);
  427. shost->shost_dev.parent = &shost->shost_gendev;
  428. shost->shost_dev.class = &shost_class;
  429. dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
  430. shost->shost_dev.groups = sht->shost_groups;
  431. shost->ehandler = kthread_run(scsi_error_handler, shost,
  432. "scsi_eh_%d", shost->host_no);
  433. if (IS_ERR(shost->ehandler)) {
  434. shost_printk(KERN_WARNING, shost,
  435. "error handler thread failed to spawn, error = %ld\n",
  436. PTR_ERR(shost->ehandler));
  437. shost->ehandler = NULL;
  438. goto fail;
  439. }
  440. shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
  441. WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
  442. 1, shost->host_no);
  443. if (!shost->tmf_work_q) {
  444. shost_printk(KERN_WARNING, shost,
  445. "failed to create tmf workq\n");
  446. goto fail;
  447. }
  448. scsi_proc_hostdir_add(shost->hostt);
  449. return shost;
  450. fail:
  451. /*
  452. * Host state is still SHOST_CREATED and that is enough to release
  453. * ->shost_gendev. scsi_host_dev_release() will free
  454. * dev_name(&shost->shost_dev).
  455. */
  456. put_device(&shost->shost_gendev);
  457. return NULL;
  458. }
  459. EXPORT_SYMBOL(scsi_host_alloc);
  460. static int __scsi_host_match(struct device *dev, const void *data)
  461. {
  462. struct Scsi_Host *p;
  463. const unsigned short *hostnum = data;
  464. p = class_to_shost(dev);
  465. return p->host_no == *hostnum;
  466. }
  467. /**
  468. * scsi_host_lookup - get a reference to a Scsi_Host by host no
  469. * @hostnum: host number to locate
  470. *
  471. * Return value:
  472. * A pointer to located Scsi_Host or NULL.
  473. *
  474. * The caller must do a scsi_host_put() to drop the reference
  475. * that scsi_host_get() took. The put_device() below dropped
  476. * the reference from class_find_device().
  477. **/
  478. struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
  479. {
  480. struct device *cdev;
  481. struct Scsi_Host *shost = NULL;
  482. cdev = class_find_device(&shost_class, NULL, &hostnum,
  483. __scsi_host_match);
  484. if (cdev) {
  485. shost = scsi_host_get(class_to_shost(cdev));
  486. put_device(cdev);
  487. }
  488. return shost;
  489. }
  490. EXPORT_SYMBOL(scsi_host_lookup);
  491. /**
  492. * scsi_host_get - inc a Scsi_Host ref count
  493. * @shost: Pointer to Scsi_Host to inc.
  494. **/
  495. struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
  496. {
  497. if ((shost->shost_state == SHOST_DEL) ||
  498. !get_device(&shost->shost_gendev))
  499. return NULL;
  500. return shost;
  501. }
  502. EXPORT_SYMBOL(scsi_host_get);
  503. static bool scsi_host_check_in_flight(struct request *rq, void *data)
  504. {
  505. int *count = data;
  506. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  507. if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
  508. (*count)++;
  509. return true;
  510. }
  511. /**
  512. * scsi_host_busy - Return the host busy counter
  513. * @shost: Pointer to Scsi_Host to inc.
  514. **/
  515. int scsi_host_busy(struct Scsi_Host *shost)
  516. {
  517. int cnt = 0;
  518. blk_mq_tagset_busy_iter(&shost->tag_set,
  519. scsi_host_check_in_flight, &cnt);
  520. return cnt;
  521. }
  522. EXPORT_SYMBOL(scsi_host_busy);
  523. /**
  524. * scsi_host_put - dec a Scsi_Host ref count
  525. * @shost: Pointer to Scsi_Host to dec.
  526. **/
  527. void scsi_host_put(struct Scsi_Host *shost)
  528. {
  529. put_device(&shost->shost_gendev);
  530. }
  531. EXPORT_SYMBOL(scsi_host_put);
  532. int scsi_init_hosts(void)
  533. {
  534. return class_register(&shost_class);
  535. }
  536. void scsi_exit_hosts(void)
  537. {
  538. class_unregister(&shost_class);
  539. ida_destroy(&host_index_ida);
  540. }
  541. int scsi_is_host_device(const struct device *dev)
  542. {
  543. return dev->type == &scsi_host_type;
  544. }
  545. EXPORT_SYMBOL(scsi_is_host_device);
  546. /**
  547. * scsi_queue_work - Queue work to the Scsi_Host workqueue.
  548. * @shost: Pointer to Scsi_Host.
  549. * @work: Work to queue for execution.
  550. *
  551. * Return value:
  552. * 1 - work queued for execution
  553. * 0 - work is already queued
  554. * -EINVAL - work queue doesn't exist
  555. **/
  556. int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
  557. {
  558. if (unlikely(!shost->work_q)) {
  559. shost_printk(KERN_ERR, shost,
  560. "ERROR: Scsi host '%s' attempted to queue scsi-work, "
  561. "when no workqueue created.\n", shost->hostt->name);
  562. dump_stack();
  563. return -EINVAL;
  564. }
  565. return queue_work(shost->work_q, work);
  566. }
  567. EXPORT_SYMBOL_GPL(scsi_queue_work);
  568. /**
  569. * scsi_flush_work - Flush a Scsi_Host's workqueue.
  570. * @shost: Pointer to Scsi_Host.
  571. **/
  572. void scsi_flush_work(struct Scsi_Host *shost)
  573. {
  574. if (!shost->work_q) {
  575. shost_printk(KERN_ERR, shost,
  576. "ERROR: Scsi host '%s' attempted to flush scsi-work, "
  577. "when no workqueue created.\n", shost->hostt->name);
  578. dump_stack();
  579. return;
  580. }
  581. flush_workqueue(shost->work_q);
  582. }
  583. EXPORT_SYMBOL_GPL(scsi_flush_work);
  584. static bool complete_all_cmds_iter(struct request *rq, void *data)
  585. {
  586. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  587. enum scsi_host_status status = *(enum scsi_host_status *)data;
  588. scsi_dma_unmap(scmd);
  589. scmd->result = 0;
  590. set_host_byte(scmd, status);
  591. scsi_done(scmd);
  592. return true;
  593. }
  594. /**
  595. * scsi_host_complete_all_commands - Terminate all running commands
  596. * @shost: Scsi Host on which commands should be terminated
  597. * @status: Status to be set for the terminated commands
  598. *
  599. * There is no protection against modification of the number
  600. * of outstanding commands. It is the responsibility of the
  601. * caller to ensure that concurrent I/O submission and/or
  602. * completion is stopped when calling this function.
  603. */
  604. void scsi_host_complete_all_commands(struct Scsi_Host *shost,
  605. enum scsi_host_status status)
  606. {
  607. blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
  608. &status);
  609. }
  610. EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
  611. struct scsi_host_busy_iter_data {
  612. bool (*fn)(struct scsi_cmnd *, void *);
  613. void *priv;
  614. };
  615. static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
  616. {
  617. struct scsi_host_busy_iter_data *iter_data = priv;
  618. struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
  619. return iter_data->fn(sc, iter_data->priv);
  620. }
  621. /**
  622. * scsi_host_busy_iter - Iterate over all busy commands
  623. * @shost: Pointer to Scsi_Host.
  624. * @fn: Function to call on each busy command
  625. * @priv: Data pointer passed to @fn
  626. *
  627. * If locking against concurrent command completions is required
  628. * ithas to be provided by the caller
  629. **/
  630. void scsi_host_busy_iter(struct Scsi_Host *shost,
  631. bool (*fn)(struct scsi_cmnd *, void *),
  632. void *priv)
  633. {
  634. struct scsi_host_busy_iter_data iter_data = {
  635. .fn = fn,
  636. .priv = priv,
  637. };
  638. blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
  639. &iter_data);
  640. }
  641. EXPORT_SYMBOL_GPL(scsi_host_busy_iter);