scsi.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * scsi.c Copyright (C) 1992 Drew Eckhardt
  4. * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
  5. * Copyright (C) 2002, 2003 Christoph Hellwig
  6. *
  7. * generic mid-level SCSI driver
  8. * Initial versions: Drew Eckhardt
  9. * Subsequent revisions: Eric Youngdale
  10. *
  11. * <[email protected]>
  12. *
  13. * Bug correction thanks go to :
  14. * Rik Faith <[email protected]>
  15. * Tommy Thorn <tthorn>
  16. * Thomas Wuensche <[email protected]>
  17. *
  18. * Modified by Eric Youngdale [email protected] or [email protected] to
  19. * add scatter-gather, multiple outstanding request, and other
  20. * enhancements.
  21. *
  22. * Native multichannel, wide scsi, /proc/scsi and hot plugging
  23. * support added by Michael Neuffer <[email protected]>
  24. *
  25. * Added request_module("scsi_hostadapter") for kerneld:
  26. * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
  27. * Bjorn Ekwall <[email protected]>
  28. * (changed to kmod)
  29. *
  30. * Major improvements to the timeout, abort, and reset processing,
  31. * as well as performance modifications for large queue depths by
  32. * Leonard N. Zubkoff <[email protected]>
  33. *
  34. * Converted cli() code to spinlocks, Ingo Molnar
  35. *
  36. * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
  37. *
  38. * out_of_space hacks, D. Gilbert (dpg) 990608
  39. */
  40. #include <linux/module.h>
  41. #include <linux/moduleparam.h>
  42. #include <linux/kernel.h>
  43. #include <linux/timer.h>
  44. #include <linux/string.h>
  45. #include <linux/slab.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/delay.h>
  48. #include <linux/init.h>
  49. #include <linux/completion.h>
  50. #include <linux/unistd.h>
  51. #include <linux/spinlock.h>
  52. #include <linux/kmod.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/notifier.h>
  55. #include <linux/cpu.h>
  56. #include <linux/mutex.h>
  57. #include <asm/unaligned.h>
  58. #include <scsi/scsi.h>
  59. #include <scsi/scsi_cmnd.h>
  60. #include <scsi/scsi_dbg.h>
  61. #include <scsi/scsi_device.h>
  62. #include <scsi/scsi_driver.h>
  63. #include <scsi/scsi_eh.h>
  64. #include <scsi/scsi_host.h>
  65. #include <scsi/scsi_tcq.h>
  66. #include "scsi_priv.h"
  67. #include "scsi_logging.h"
  68. #define CREATE_TRACE_POINTS
  69. #include <trace/events/scsi.h>
  70. /*
  71. * Definitions and constants.
  72. */
  73. /*
  74. * Note - the initial logging level can be set here to log events at boot time.
  75. * After the system is up, you may enable logging via the /proc interface.
  76. */
  77. unsigned int scsi_logging_level;
  78. #if defined(CONFIG_SCSI_LOGGING)
  79. EXPORT_SYMBOL(scsi_logging_level);
  80. #endif
  81. #ifdef CONFIG_SCSI_LOGGING
  82. void scsi_log_send(struct scsi_cmnd *cmd)
  83. {
  84. unsigned int level;
  85. /*
  86. * If ML QUEUE log level is greater than or equal to:
  87. *
  88. * 1: nothing (match completion)
  89. *
  90. * 2: log opcode + command of all commands + cmd address
  91. *
  92. * 3: same as 2
  93. *
  94. * 4: same as 3
  95. */
  96. if (unlikely(scsi_logging_level)) {
  97. level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
  98. SCSI_LOG_MLQUEUE_BITS);
  99. if (level > 1) {
  100. scmd_printk(KERN_INFO, cmd,
  101. "Send: scmd 0x%p\n", cmd);
  102. scsi_print_command(cmd);
  103. }
  104. }
  105. }
  106. void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
  107. {
  108. unsigned int level;
  109. /*
  110. * If ML COMPLETE log level is greater than or equal to:
  111. *
  112. * 1: log disposition, result, opcode + command, and conditionally
  113. * sense data for failures or non SUCCESS dispositions.
  114. *
  115. * 2: same as 1 but for all command completions.
  116. *
  117. * 3: same as 2
  118. *
  119. * 4: same as 3 plus dump extra junk
  120. */
  121. if (unlikely(scsi_logging_level)) {
  122. level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
  123. SCSI_LOG_MLCOMPLETE_BITS);
  124. if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
  125. (level > 1)) {
  126. scsi_print_result(cmd, "Done", disposition);
  127. scsi_print_command(cmd);
  128. if (scsi_status_is_check_condition(cmd->result))
  129. scsi_print_sense(cmd);
  130. if (level > 3)
  131. scmd_printk(KERN_INFO, cmd,
  132. "scsi host busy %d failed %d\n",
  133. scsi_host_busy(cmd->device->host),
  134. cmd->device->host->host_failed);
  135. }
  136. }
  137. }
  138. #endif
  139. /**
  140. * scsi_finish_command - cleanup and pass command back to upper layer
  141. * @cmd: the command
  142. *
  143. * Description: Pass command off to upper layer for finishing of I/O
  144. * request, waking processes that are waiting on results,
  145. * etc.
  146. */
  147. void scsi_finish_command(struct scsi_cmnd *cmd)
  148. {
  149. struct scsi_device *sdev = cmd->device;
  150. struct scsi_target *starget = scsi_target(sdev);
  151. struct Scsi_Host *shost = sdev->host;
  152. struct scsi_driver *drv;
  153. unsigned int good_bytes;
  154. scsi_device_unbusy(sdev, cmd);
  155. /*
  156. * Clear the flags that say that the device/target/host is no longer
  157. * capable of accepting new commands.
  158. */
  159. if (atomic_read(&shost->host_blocked))
  160. atomic_set(&shost->host_blocked, 0);
  161. if (atomic_read(&starget->target_blocked))
  162. atomic_set(&starget->target_blocked, 0);
  163. if (atomic_read(&sdev->device_blocked))
  164. atomic_set(&sdev->device_blocked, 0);
  165. SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
  166. "Notifying upper driver of completion "
  167. "(result %x)\n", cmd->result));
  168. good_bytes = scsi_bufflen(cmd);
  169. if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
  170. int old_good_bytes = good_bytes;
  171. drv = scsi_cmd_to_driver(cmd);
  172. if (drv->done)
  173. good_bytes = drv->done(cmd);
  174. /*
  175. * USB may not give sense identifying bad sector and
  176. * simply return a residue instead, so subtract off the
  177. * residue if drv->done() error processing indicates no
  178. * change to the completion length.
  179. */
  180. if (good_bytes == old_good_bytes)
  181. good_bytes -= scsi_get_resid(cmd);
  182. }
  183. scsi_io_completion(cmd, good_bytes);
  184. }
  185. /*
  186. * 4096 is big enough for saturating fast SCSI LUNs.
  187. */
  188. int scsi_device_max_queue_depth(struct scsi_device *sdev)
  189. {
  190. return min_t(int, sdev->host->can_queue, 4096);
  191. }
  192. /**
  193. * scsi_change_queue_depth - change a device's queue depth
  194. * @sdev: SCSI Device in question
  195. * @depth: number of commands allowed to be queued to the driver
  196. *
  197. * Sets the device queue depth and returns the new value.
  198. */
  199. int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
  200. {
  201. depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
  202. if (depth > 0) {
  203. sdev->queue_depth = depth;
  204. wmb();
  205. }
  206. if (sdev->request_queue)
  207. blk_set_queue_depth(sdev->request_queue, depth);
  208. sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
  209. return sdev->queue_depth;
  210. }
  211. EXPORT_SYMBOL(scsi_change_queue_depth);
  212. /**
  213. * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
  214. * @sdev: SCSI Device in question
  215. * @depth: Current number of outstanding SCSI commands on this device,
  216. * not counting the one returned as QUEUE_FULL.
  217. *
  218. * Description: This function will track successive QUEUE_FULL events on a
  219. * specific SCSI device to determine if and when there is a
  220. * need to adjust the queue depth on the device.
  221. *
  222. * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
  223. * -1 - Drop back to untagged operation using host->cmd_per_lun
  224. * as the untagged command depth
  225. *
  226. * Lock Status: None held on entry
  227. *
  228. * Notes: Low level drivers may call this at any time and we will do
  229. * "The Right Thing." We are interrupt context safe.
  230. */
  231. int scsi_track_queue_full(struct scsi_device *sdev, int depth)
  232. {
  233. /*
  234. * Don't let QUEUE_FULLs on the same
  235. * jiffies count, they could all be from
  236. * same event.
  237. */
  238. if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
  239. return 0;
  240. sdev->last_queue_full_time = jiffies;
  241. if (sdev->last_queue_full_depth != depth) {
  242. sdev->last_queue_full_count = 1;
  243. sdev->last_queue_full_depth = depth;
  244. } else {
  245. sdev->last_queue_full_count++;
  246. }
  247. if (sdev->last_queue_full_count <= 10)
  248. return 0;
  249. return scsi_change_queue_depth(sdev, depth);
  250. }
  251. EXPORT_SYMBOL(scsi_track_queue_full);
  252. /**
  253. * scsi_vpd_inquiry - Request a device provide us with a VPD page
  254. * @sdev: The device to ask
  255. * @buffer: Where to put the result
  256. * @page: Which Vital Product Data to return
  257. * @len: The length of the buffer
  258. *
  259. * This is an internal helper function. You probably want to use
  260. * scsi_get_vpd_page instead.
  261. *
  262. * Returns size of the vpd page on success or a negative error number.
  263. */
  264. static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
  265. u8 page, unsigned len)
  266. {
  267. int result;
  268. unsigned char cmd[16];
  269. if (len < 4)
  270. return -EINVAL;
  271. cmd[0] = INQUIRY;
  272. cmd[1] = 1; /* EVPD */
  273. cmd[2] = page;
  274. cmd[3] = len >> 8;
  275. cmd[4] = len & 0xff;
  276. cmd[5] = 0; /* Control byte */
  277. /*
  278. * I'm not convinced we need to try quite this hard to get VPD, but
  279. * all the existing users tried this hard.
  280. */
  281. result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
  282. 30 * HZ, 3, NULL);
  283. if (result)
  284. return -EIO;
  285. /*
  286. * Sanity check that we got the page back that we asked for and that
  287. * the page size is not 0.
  288. */
  289. if (buffer[1] != page)
  290. return -EIO;
  291. result = get_unaligned_be16(&buffer[2]);
  292. if (!result)
  293. return -EIO;
  294. return result + 4;
  295. }
  296. static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
  297. {
  298. unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
  299. int result;
  300. if (sdev->no_vpd_size)
  301. return SCSI_DEFAULT_VPD_LEN;
  302. /*
  303. * Fetch the VPD page header to find out how big the page
  304. * is. This is done to prevent problems on legacy devices
  305. * which can not handle allocation lengths as large as
  306. * potentially requested by the caller.
  307. */
  308. result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
  309. if (result < 0)
  310. return 0;
  311. if (result < SCSI_VPD_HEADER_SIZE) {
  312. dev_warn_once(&sdev->sdev_gendev,
  313. "%s: short VPD page 0x%02x length: %d bytes\n",
  314. __func__, page, result);
  315. return 0;
  316. }
  317. return result;
  318. }
  319. /**
  320. * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
  321. * @sdev: The device to ask
  322. * @page: Which Vital Product Data to return
  323. * @buf: where to store the VPD
  324. * @buf_len: number of bytes in the VPD buffer area
  325. *
  326. * SCSI devices may optionally supply Vital Product Data. Each 'page'
  327. * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
  328. * If the device supports this VPD page, this routine fills @buf
  329. * with the data from that page and return 0. If the VPD page is not
  330. * supported or its content cannot be retrieved, -EINVAL is returned.
  331. */
  332. int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
  333. int buf_len)
  334. {
  335. int result, vpd_len;
  336. if (!scsi_device_supports_vpd(sdev))
  337. return -EINVAL;
  338. vpd_len = scsi_get_vpd_size(sdev, page);
  339. if (vpd_len <= 0)
  340. return -EINVAL;
  341. vpd_len = min(vpd_len, buf_len);
  342. /*
  343. * Fetch the actual page. Since the appropriate size was reported
  344. * by the device it is now safe to ask for something bigger.
  345. */
  346. memset(buf, 0, buf_len);
  347. result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
  348. if (result < 0)
  349. return -EINVAL;
  350. else if (result > vpd_len)
  351. dev_warn_once(&sdev->sdev_gendev,
  352. "%s: VPD page 0x%02x result %d > %d bytes\n",
  353. __func__, page, result, vpd_len);
  354. return 0;
  355. }
  356. EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
  357. /**
  358. * scsi_get_vpd_buf - Get Vital Product Data from a SCSI device
  359. * @sdev: The device to ask
  360. * @page: Which Vital Product Data to return
  361. *
  362. * Returns %NULL upon failure.
  363. */
  364. static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
  365. {
  366. struct scsi_vpd *vpd_buf;
  367. int vpd_len, result;
  368. vpd_len = scsi_get_vpd_size(sdev, page);
  369. if (vpd_len <= 0)
  370. return NULL;
  371. retry_pg:
  372. /*
  373. * Fetch the actual page. Since the appropriate size was reported
  374. * by the device it is now safe to ask for something bigger.
  375. */
  376. vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
  377. if (!vpd_buf)
  378. return NULL;
  379. result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
  380. if (result < 0) {
  381. kfree(vpd_buf);
  382. return NULL;
  383. }
  384. if (result > vpd_len) {
  385. dev_warn_once(&sdev->sdev_gendev,
  386. "%s: VPD page 0x%02x result %d > %d bytes\n",
  387. __func__, page, result, vpd_len);
  388. vpd_len = result;
  389. kfree(vpd_buf);
  390. goto retry_pg;
  391. }
  392. vpd_buf->len = result;
  393. return vpd_buf;
  394. }
  395. static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
  396. struct scsi_vpd __rcu **sdev_vpd_buf)
  397. {
  398. struct scsi_vpd *vpd_buf;
  399. vpd_buf = scsi_get_vpd_buf(sdev, page);
  400. if (!vpd_buf)
  401. return;
  402. mutex_lock(&sdev->inquiry_mutex);
  403. vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf,
  404. lockdep_is_held(&sdev->inquiry_mutex));
  405. mutex_unlock(&sdev->inquiry_mutex);
  406. if (vpd_buf)
  407. kfree_rcu(vpd_buf, rcu);
  408. }
  409. /**
  410. * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
  411. * @sdev: The device to ask
  412. *
  413. * Attach the 'Device Identification' VPD page (0x83) and the
  414. * 'Unit Serial Number' VPD page (0x80) to a SCSI device
  415. * structure. This information can be used to identify the device
  416. * uniquely.
  417. */
  418. void scsi_attach_vpd(struct scsi_device *sdev)
  419. {
  420. int i;
  421. struct scsi_vpd *vpd_buf;
  422. if (!scsi_device_supports_vpd(sdev))
  423. return;
  424. /* Ask for all the pages supported by this device */
  425. vpd_buf = scsi_get_vpd_buf(sdev, 0);
  426. if (!vpd_buf)
  427. return;
  428. for (i = 4; i < vpd_buf->len; i++) {
  429. if (vpd_buf->data[i] == 0x0)
  430. scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
  431. if (vpd_buf->data[i] == 0x80)
  432. scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
  433. if (vpd_buf->data[i] == 0x83)
  434. scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
  435. if (vpd_buf->data[i] == 0x89)
  436. scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
  437. if (vpd_buf->data[i] == 0xb0)
  438. scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
  439. if (vpd_buf->data[i] == 0xb1)
  440. scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
  441. if (vpd_buf->data[i] == 0xb2)
  442. scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
  443. }
  444. kfree(vpd_buf);
  445. }
  446. /**
  447. * scsi_report_opcode - Find out if a given command opcode is supported
  448. * @sdev: scsi device to query
  449. * @buffer: scratch buffer (must be at least 20 bytes long)
  450. * @len: length of buffer
  451. * @opcode: opcode for command to look up
  452. *
  453. * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
  454. * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
  455. * unsupported and 1 if the device claims to support the command.
  456. */
  457. int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
  458. unsigned int len, unsigned char opcode)
  459. {
  460. unsigned char cmd[16];
  461. struct scsi_sense_hdr sshdr;
  462. int result, request_len;
  463. const struct scsi_exec_args exec_args = {
  464. .sshdr = &sshdr,
  465. };
  466. if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
  467. return -EINVAL;
  468. /* RSOC header + size of command we are asking about */
  469. request_len = 4 + COMMAND_SIZE(opcode);
  470. if (request_len > len) {
  471. dev_warn_once(&sdev->sdev_gendev,
  472. "%s: len %u bytes, opcode 0x%02x needs %u\n",
  473. __func__, len, opcode, request_len);
  474. return -EINVAL;
  475. }
  476. memset(cmd, 0, 16);
  477. cmd[0] = MAINTENANCE_IN;
  478. cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
  479. cmd[2] = 1; /* One command format */
  480. cmd[3] = opcode;
  481. put_unaligned_be32(request_len, &cmd[6]);
  482. memset(buffer, 0, len);
  483. result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer,
  484. request_len, 30 * HZ, 3, &exec_args);
  485. if (result < 0)
  486. return result;
  487. if (result && scsi_sense_valid(&sshdr) &&
  488. sshdr.sense_key == ILLEGAL_REQUEST &&
  489. (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
  490. return -EINVAL;
  491. if ((buffer[1] & 3) == 3) /* Command supported */
  492. return 1;
  493. return 0;
  494. }
  495. EXPORT_SYMBOL(scsi_report_opcode);
  496. /**
  497. * scsi_device_get - get an additional reference to a scsi_device
  498. * @sdev: device to get a reference to
  499. *
  500. * Description: Gets a reference to the scsi_device and increments the use count
  501. * of the underlying LLDD module. You must hold host_lock of the
  502. * parent Scsi_Host or already have a reference when calling this.
  503. *
  504. * This will fail if a device is deleted or cancelled, or when the LLD module
  505. * is in the process of being unloaded.
  506. */
  507. int scsi_device_get(struct scsi_device *sdev)
  508. {
  509. if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
  510. goto fail;
  511. if (!get_device(&sdev->sdev_gendev))
  512. goto fail;
  513. if (!try_module_get(sdev->host->hostt->module))
  514. goto fail_put_device;
  515. return 0;
  516. fail_put_device:
  517. put_device(&sdev->sdev_gendev);
  518. fail:
  519. return -ENXIO;
  520. }
  521. EXPORT_SYMBOL(scsi_device_get);
  522. /**
  523. * scsi_device_put - release a reference to a scsi_device
  524. * @sdev: device to release a reference on.
  525. *
  526. * Description: Release a reference to the scsi_device and decrements the use
  527. * count of the underlying LLDD module. The device is freed once the last
  528. * user vanishes.
  529. */
  530. void scsi_device_put(struct scsi_device *sdev)
  531. {
  532. struct module *mod = sdev->host->hostt->module;
  533. put_device(&sdev->sdev_gendev);
  534. module_put(mod);
  535. }
  536. EXPORT_SYMBOL(scsi_device_put);
  537. /* helper for shost_for_each_device, see that for documentation */
  538. struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
  539. struct scsi_device *prev)
  540. {
  541. struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
  542. struct scsi_device *next = NULL;
  543. unsigned long flags;
  544. spin_lock_irqsave(shost->host_lock, flags);
  545. while (list->next != &shost->__devices) {
  546. next = list_entry(list->next, struct scsi_device, siblings);
  547. /* skip devices that we can't get a reference to */
  548. if (!scsi_device_get(next))
  549. break;
  550. next = NULL;
  551. list = list->next;
  552. }
  553. spin_unlock_irqrestore(shost->host_lock, flags);
  554. if (prev)
  555. scsi_device_put(prev);
  556. return next;
  557. }
  558. EXPORT_SYMBOL(__scsi_iterate_devices);
  559. /**
  560. * starget_for_each_device - helper to walk all devices of a target
  561. * @starget: target whose devices we want to iterate over.
  562. * @data: Opaque passed to each function call.
  563. * @fn: Function to call on each device
  564. *
  565. * This traverses over each device of @starget. The devices have
  566. * a reference that must be released by scsi_host_put when breaking
  567. * out of the loop.
  568. */
  569. void starget_for_each_device(struct scsi_target *starget, void *data,
  570. void (*fn)(struct scsi_device *, void *))
  571. {
  572. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  573. struct scsi_device *sdev;
  574. shost_for_each_device(sdev, shost) {
  575. if ((sdev->channel == starget->channel) &&
  576. (sdev->id == starget->id))
  577. fn(sdev, data);
  578. }
  579. }
  580. EXPORT_SYMBOL(starget_for_each_device);
  581. /**
  582. * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
  583. * @starget: target whose devices we want to iterate over.
  584. * @data: parameter for callback @fn()
  585. * @fn: callback function that is invoked for each device
  586. *
  587. * This traverses over each device of @starget. It does _not_
  588. * take a reference on the scsi_device, so the whole loop must be
  589. * protected by shost->host_lock.
  590. *
  591. * Note: The only reason why drivers would want to use this is because
  592. * they need to access the device list in irq context. Otherwise you
  593. * really want to use starget_for_each_device instead.
  594. **/
  595. void __starget_for_each_device(struct scsi_target *starget, void *data,
  596. void (*fn)(struct scsi_device *, void *))
  597. {
  598. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  599. struct scsi_device *sdev;
  600. __shost_for_each_device(sdev, shost) {
  601. if ((sdev->channel == starget->channel) &&
  602. (sdev->id == starget->id))
  603. fn(sdev, data);
  604. }
  605. }
  606. EXPORT_SYMBOL(__starget_for_each_device);
  607. /**
  608. * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
  609. * @starget: SCSI target pointer
  610. * @lun: SCSI Logical Unit Number
  611. *
  612. * Description: Looks up the scsi_device with the specified @lun for a given
  613. * @starget. The returned scsi_device does not have an additional
  614. * reference. You must hold the host's host_lock over this call and
  615. * any access to the returned scsi_device. A scsi_device in state
  616. * SDEV_DEL is skipped.
  617. *
  618. * Note: The only reason why drivers should use this is because
  619. * they need to access the device list in irq context. Otherwise you
  620. * really want to use scsi_device_lookup_by_target instead.
  621. **/
  622. struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
  623. u64 lun)
  624. {
  625. struct scsi_device *sdev;
  626. list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
  627. if (sdev->sdev_state == SDEV_DEL)
  628. continue;
  629. if (sdev->lun ==lun)
  630. return sdev;
  631. }
  632. return NULL;
  633. }
  634. EXPORT_SYMBOL(__scsi_device_lookup_by_target);
  635. /**
  636. * scsi_device_lookup_by_target - find a device given the target
  637. * @starget: SCSI target pointer
  638. * @lun: SCSI Logical Unit Number
  639. *
  640. * Description: Looks up the scsi_device with the specified @lun for a given
  641. * @starget. The returned scsi_device has an additional reference that
  642. * needs to be released with scsi_device_put once you're done with it.
  643. **/
  644. struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
  645. u64 lun)
  646. {
  647. struct scsi_device *sdev;
  648. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
  649. unsigned long flags;
  650. spin_lock_irqsave(shost->host_lock, flags);
  651. sdev = __scsi_device_lookup_by_target(starget, lun);
  652. if (sdev && scsi_device_get(sdev))
  653. sdev = NULL;
  654. spin_unlock_irqrestore(shost->host_lock, flags);
  655. return sdev;
  656. }
  657. EXPORT_SYMBOL(scsi_device_lookup_by_target);
  658. /**
  659. * __scsi_device_lookup - find a device given the host (UNLOCKED)
  660. * @shost: SCSI host pointer
  661. * @channel: SCSI channel (zero if only one channel)
  662. * @id: SCSI target number (physical unit number)
  663. * @lun: SCSI Logical Unit Number
  664. *
  665. * Description: Looks up the scsi_device with the specified @channel, @id, @lun
  666. * for a given host. The returned scsi_device does not have an additional
  667. * reference. You must hold the host's host_lock over this call and any access
  668. * to the returned scsi_device.
  669. *
  670. * Note: The only reason why drivers would want to use this is because
  671. * they need to access the device list in irq context. Otherwise you
  672. * really want to use scsi_device_lookup instead.
  673. **/
  674. struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
  675. uint channel, uint id, u64 lun)
  676. {
  677. struct scsi_device *sdev;
  678. list_for_each_entry(sdev, &shost->__devices, siblings) {
  679. if (sdev->sdev_state == SDEV_DEL)
  680. continue;
  681. if (sdev->channel == channel && sdev->id == id &&
  682. sdev->lun ==lun)
  683. return sdev;
  684. }
  685. return NULL;
  686. }
  687. EXPORT_SYMBOL(__scsi_device_lookup);
  688. /**
  689. * scsi_device_lookup - find a device given the host
  690. * @shost: SCSI host pointer
  691. * @channel: SCSI channel (zero if only one channel)
  692. * @id: SCSI target number (physical unit number)
  693. * @lun: SCSI Logical Unit Number
  694. *
  695. * Description: Looks up the scsi_device with the specified @channel, @id, @lun
  696. * for a given host. The returned scsi_device has an additional reference that
  697. * needs to be released with scsi_device_put once you're done with it.
  698. **/
  699. struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
  700. uint channel, uint id, u64 lun)
  701. {
  702. struct scsi_device *sdev;
  703. unsigned long flags;
  704. spin_lock_irqsave(shost->host_lock, flags);
  705. sdev = __scsi_device_lookup(shost, channel, id, lun);
  706. if (sdev && scsi_device_get(sdev))
  707. sdev = NULL;
  708. spin_unlock_irqrestore(shost->host_lock, flags);
  709. return sdev;
  710. }
  711. EXPORT_SYMBOL(scsi_device_lookup);
  712. MODULE_DESCRIPTION("SCSI core");
  713. MODULE_LICENSE("GPL");
  714. module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
  715. MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
  716. static int __init init_scsi(void)
  717. {
  718. int error;
  719. error = scsi_init_procfs();
  720. if (error)
  721. goto cleanup_queue;
  722. error = scsi_init_devinfo();
  723. if (error)
  724. goto cleanup_procfs;
  725. error = scsi_init_hosts();
  726. if (error)
  727. goto cleanup_devlist;
  728. error = scsi_init_sysctl();
  729. if (error)
  730. goto cleanup_hosts;
  731. error = scsi_sysfs_register();
  732. if (error)
  733. goto cleanup_sysctl;
  734. scsi_netlink_init();
  735. printk(KERN_NOTICE "SCSI subsystem initialized\n");
  736. return 0;
  737. cleanup_sysctl:
  738. scsi_exit_sysctl();
  739. cleanup_hosts:
  740. scsi_exit_hosts();
  741. cleanup_devlist:
  742. scsi_exit_devinfo();
  743. cleanup_procfs:
  744. scsi_exit_procfs();
  745. cleanup_queue:
  746. scsi_exit_queue();
  747. printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
  748. -error);
  749. return error;
  750. }
  751. static void __exit exit_scsi(void)
  752. {
  753. scsi_netlink_exit();
  754. scsi_sysfs_unregister();
  755. scsi_exit_sysctl();
  756. scsi_exit_hosts();
  757. scsi_exit_devinfo();
  758. scsi_exit_procfs();
  759. scsi_exit_queue();
  760. }
  761. subsys_initcall(init_scsi);
  762. module_exit(exit_scsi);