scsi_transport_srp.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SCSI RDMA (SRP) transport class
  4. *
  5. * Copyright (C) 2007 FUJITA Tomonori <[email protected]>
  6. */
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/err.h>
  11. #include <linux/slab.h>
  12. #include <linux/string.h>
  13. #include <scsi/scsi.h>
  14. #include <scsi/scsi_cmnd.h>
  15. #include <scsi/scsi_device.h>
  16. #include <scsi/scsi_host.h>
  17. #include <scsi/scsi_transport.h>
  18. #include <scsi/scsi_transport_srp.h>
  19. #include "scsi_priv.h"
  20. struct srp_host_attrs {
  21. atomic_t next_port_id;
  22. };
  23. #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
  24. #define SRP_HOST_ATTRS 0
  25. #define SRP_RPORT_ATTRS 8
  26. struct srp_internal {
  27. struct scsi_transport_template t;
  28. struct srp_function_template *f;
  29. struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
  30. struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
  31. struct transport_container rport_attr_cont;
  32. };
  33. static int scsi_is_srp_rport(const struct device *dev);
  34. #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
  35. #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
  36. #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
  37. static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
  38. {
  39. return dev_to_shost(r->dev.parent);
  40. }
  41. static int find_child_rport(struct device *dev, void *data)
  42. {
  43. struct device **child = data;
  44. if (scsi_is_srp_rport(dev)) {
  45. WARN_ON_ONCE(*child);
  46. *child = dev;
  47. }
  48. return 0;
  49. }
  50. static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
  51. {
  52. struct device *child = NULL;
  53. WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
  54. find_child_rport) < 0);
  55. return child ? dev_to_rport(child) : NULL;
  56. }
  57. /**
  58. * srp_tmo_valid() - check timeout combination validity
  59. * @reconnect_delay: Reconnect delay in seconds.
  60. * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
  61. * @dev_loss_tmo: Device loss timeout in seconds.
  62. *
  63. * The combination of the timeout parameters must be such that SCSI commands
  64. * are finished in a reasonable time. Hence do not allow the fast I/O fail
  65. * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
  66. * exceed that limit if failing I/O fast has been disabled. Furthermore, these
  67. * parameters must be such that multipath can detect failed paths timely.
  68. * Hence do not allow all three parameters to be disabled simultaneously.
  69. */
  70. int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo)
  71. {
  72. if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0)
  73. return -EINVAL;
  74. if (reconnect_delay == 0)
  75. return -EINVAL;
  76. if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  77. return -EINVAL;
  78. if (fast_io_fail_tmo < 0 &&
  79. dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  80. return -EINVAL;
  81. if (dev_loss_tmo >= LONG_MAX / HZ)
  82. return -EINVAL;
  83. if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
  84. fast_io_fail_tmo >= dev_loss_tmo)
  85. return -EINVAL;
  86. return 0;
  87. }
  88. EXPORT_SYMBOL_GPL(srp_tmo_valid);
  89. static int srp_host_setup(struct transport_container *tc, struct device *dev,
  90. struct device *cdev)
  91. {
  92. struct Scsi_Host *shost = dev_to_shost(dev);
  93. struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
  94. atomic_set(&srp_host->next_port_id, 0);
  95. return 0;
  96. }
  97. static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
  98. NULL, NULL);
  99. static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
  100. NULL, NULL, NULL);
  101. static ssize_t
  102. show_srp_rport_id(struct device *dev, struct device_attribute *attr,
  103. char *buf)
  104. {
  105. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  106. return sprintf(buf, "%16phC\n", rport->port_id);
  107. }
  108. static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
  109. static const struct {
  110. u32 value;
  111. char *name;
  112. } srp_rport_role_names[] = {
  113. {SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
  114. {SRP_RPORT_ROLE_TARGET, "SRP Target"},
  115. };
  116. static ssize_t
  117. show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
  118. char *buf)
  119. {
  120. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  121. int i;
  122. char *name = NULL;
  123. for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
  124. if (srp_rport_role_names[i].value == rport->roles) {
  125. name = srp_rport_role_names[i].name;
  126. break;
  127. }
  128. return sprintf(buf, "%s\n", name ? : "unknown");
  129. }
  130. static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
  131. static ssize_t store_srp_rport_delete(struct device *dev,
  132. struct device_attribute *attr,
  133. const char *buf, size_t count)
  134. {
  135. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  136. struct Scsi_Host *shost = dev_to_shost(dev);
  137. struct srp_internal *i = to_srp_internal(shost->transportt);
  138. if (i->f->rport_delete) {
  139. i->f->rport_delete(rport);
  140. return count;
  141. } else {
  142. return -ENOSYS;
  143. }
  144. }
  145. static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
  146. static ssize_t show_srp_rport_state(struct device *dev,
  147. struct device_attribute *attr,
  148. char *buf)
  149. {
  150. static const char *const state_name[] = {
  151. [SRP_RPORT_RUNNING] = "running",
  152. [SRP_RPORT_BLOCKED] = "blocked",
  153. [SRP_RPORT_FAIL_FAST] = "fail-fast",
  154. [SRP_RPORT_LOST] = "lost",
  155. };
  156. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  157. enum srp_rport_state state = rport->state;
  158. return sprintf(buf, "%s\n",
  159. (unsigned)state < ARRAY_SIZE(state_name) ?
  160. state_name[state] : "???");
  161. }
  162. static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL);
  163. static ssize_t srp_show_tmo(char *buf, int tmo)
  164. {
  165. return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
  166. }
  167. int srp_parse_tmo(int *tmo, const char *buf)
  168. {
  169. int res = 0;
  170. if (strncmp(buf, "off", 3) != 0)
  171. res = kstrtoint(buf, 0, tmo);
  172. else
  173. *tmo = -1;
  174. return res;
  175. }
  176. EXPORT_SYMBOL(srp_parse_tmo);
  177. static ssize_t show_reconnect_delay(struct device *dev,
  178. struct device_attribute *attr, char *buf)
  179. {
  180. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  181. return srp_show_tmo(buf, rport->reconnect_delay);
  182. }
  183. static ssize_t store_reconnect_delay(struct device *dev,
  184. struct device_attribute *attr,
  185. const char *buf, const size_t count)
  186. {
  187. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  188. int res, delay;
  189. res = srp_parse_tmo(&delay, buf);
  190. if (res)
  191. goto out;
  192. res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
  193. rport->dev_loss_tmo);
  194. if (res)
  195. goto out;
  196. if (rport->reconnect_delay <= 0 && delay > 0 &&
  197. rport->state != SRP_RPORT_RUNNING) {
  198. queue_delayed_work(system_long_wq, &rport->reconnect_work,
  199. delay * HZ);
  200. } else if (delay <= 0) {
  201. cancel_delayed_work(&rport->reconnect_work);
  202. }
  203. rport->reconnect_delay = delay;
  204. res = count;
  205. out:
  206. return res;
  207. }
  208. static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay,
  209. store_reconnect_delay);
  210. static ssize_t show_failed_reconnects(struct device *dev,
  211. struct device_attribute *attr, char *buf)
  212. {
  213. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  214. return sprintf(buf, "%d\n", rport->failed_reconnects);
  215. }
  216. static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL);
  217. static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev,
  218. struct device_attribute *attr,
  219. char *buf)
  220. {
  221. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  222. return srp_show_tmo(buf, rport->fast_io_fail_tmo);
  223. }
  224. static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev,
  225. struct device_attribute *attr,
  226. const char *buf, size_t count)
  227. {
  228. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  229. int res;
  230. int fast_io_fail_tmo;
  231. res = srp_parse_tmo(&fast_io_fail_tmo, buf);
  232. if (res)
  233. goto out;
  234. res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
  235. rport->dev_loss_tmo);
  236. if (res)
  237. goto out;
  238. rport->fast_io_fail_tmo = fast_io_fail_tmo;
  239. res = count;
  240. out:
  241. return res;
  242. }
  243. static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
  244. show_srp_rport_fast_io_fail_tmo,
  245. store_srp_rport_fast_io_fail_tmo);
  246. static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev,
  247. struct device_attribute *attr,
  248. char *buf)
  249. {
  250. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  251. return srp_show_tmo(buf, rport->dev_loss_tmo);
  252. }
  253. static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev,
  254. struct device_attribute *attr,
  255. const char *buf, size_t count)
  256. {
  257. struct srp_rport *rport = transport_class_to_srp_rport(dev);
  258. int res;
  259. int dev_loss_tmo;
  260. res = srp_parse_tmo(&dev_loss_tmo, buf);
  261. if (res)
  262. goto out;
  263. res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
  264. dev_loss_tmo);
  265. if (res)
  266. goto out;
  267. rport->dev_loss_tmo = dev_loss_tmo;
  268. res = count;
  269. out:
  270. return res;
  271. }
  272. static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR,
  273. show_srp_rport_dev_loss_tmo,
  274. store_srp_rport_dev_loss_tmo);
  275. static int srp_rport_set_state(struct srp_rport *rport,
  276. enum srp_rport_state new_state)
  277. {
  278. enum srp_rport_state old_state = rport->state;
  279. lockdep_assert_held(&rport->mutex);
  280. switch (new_state) {
  281. case SRP_RPORT_RUNNING:
  282. switch (old_state) {
  283. case SRP_RPORT_LOST:
  284. goto invalid;
  285. default:
  286. break;
  287. }
  288. break;
  289. case SRP_RPORT_BLOCKED:
  290. switch (old_state) {
  291. case SRP_RPORT_RUNNING:
  292. break;
  293. default:
  294. goto invalid;
  295. }
  296. break;
  297. case SRP_RPORT_FAIL_FAST:
  298. switch (old_state) {
  299. case SRP_RPORT_LOST:
  300. goto invalid;
  301. default:
  302. break;
  303. }
  304. break;
  305. case SRP_RPORT_LOST:
  306. break;
  307. }
  308. rport->state = new_state;
  309. return 0;
  310. invalid:
  311. return -EINVAL;
  312. }
  313. /**
  314. * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
  315. * @work: Work structure used for scheduling this operation.
  316. */
  317. static void srp_reconnect_work(struct work_struct *work)
  318. {
  319. struct srp_rport *rport = container_of(to_delayed_work(work),
  320. struct srp_rport, reconnect_work);
  321. struct Scsi_Host *shost = rport_to_shost(rport);
  322. int delay, res;
  323. res = srp_reconnect_rport(rport);
  324. if (res != 0) {
  325. shost_printk(KERN_ERR, shost,
  326. "reconnect attempt %d failed (%d)\n",
  327. ++rport->failed_reconnects, res);
  328. delay = rport->reconnect_delay *
  329. min(100, max(1, rport->failed_reconnects - 10));
  330. if (delay > 0)
  331. queue_delayed_work(system_long_wq,
  332. &rport->reconnect_work, delay * HZ);
  333. }
  334. }
  335. /*
  336. * scsi_target_block() must have been called before this function is
  337. * called to guarantee that no .queuecommand() calls are in progress.
  338. */
  339. static void __rport_fail_io_fast(struct srp_rport *rport)
  340. {
  341. struct Scsi_Host *shost = rport_to_shost(rport);
  342. struct srp_internal *i;
  343. lockdep_assert_held(&rport->mutex);
  344. if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
  345. return;
  346. scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
  347. /* Involve the LLD if possible to terminate all I/O on the rport. */
  348. i = to_srp_internal(shost->transportt);
  349. if (i->f->terminate_rport_io)
  350. i->f->terminate_rport_io(rport);
  351. }
  352. /**
  353. * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
  354. * @work: Work structure used for scheduling this operation.
  355. */
  356. static void rport_fast_io_fail_timedout(struct work_struct *work)
  357. {
  358. struct srp_rport *rport = container_of(to_delayed_work(work),
  359. struct srp_rport, fast_io_fail_work);
  360. struct Scsi_Host *shost = rport_to_shost(rport);
  361. pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
  362. dev_name(&rport->dev), dev_name(&shost->shost_gendev));
  363. mutex_lock(&rport->mutex);
  364. if (rport->state == SRP_RPORT_BLOCKED)
  365. __rport_fail_io_fast(rport);
  366. mutex_unlock(&rport->mutex);
  367. }
  368. /**
  369. * rport_dev_loss_timedout() - device loss timeout handler
  370. * @work: Work structure used for scheduling this operation.
  371. */
  372. static void rport_dev_loss_timedout(struct work_struct *work)
  373. {
  374. struct srp_rport *rport = container_of(to_delayed_work(work),
  375. struct srp_rport, dev_loss_work);
  376. struct Scsi_Host *shost = rport_to_shost(rport);
  377. struct srp_internal *i = to_srp_internal(shost->transportt);
  378. pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
  379. dev_name(&rport->dev), dev_name(&shost->shost_gendev));
  380. mutex_lock(&rport->mutex);
  381. WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
  382. scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
  383. mutex_unlock(&rport->mutex);
  384. i->f->rport_delete(rport);
  385. }
  386. static void __srp_start_tl_fail_timers(struct srp_rport *rport)
  387. {
  388. struct Scsi_Host *shost = rport_to_shost(rport);
  389. int delay, fast_io_fail_tmo, dev_loss_tmo;
  390. lockdep_assert_held(&rport->mutex);
  391. delay = rport->reconnect_delay;
  392. fast_io_fail_tmo = rport->fast_io_fail_tmo;
  393. dev_loss_tmo = rport->dev_loss_tmo;
  394. pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev),
  395. rport->state);
  396. if (rport->state == SRP_RPORT_LOST)
  397. return;
  398. if (delay > 0)
  399. queue_delayed_work(system_long_wq, &rport->reconnect_work,
  400. 1UL * delay * HZ);
  401. if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
  402. srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
  403. pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
  404. rport->state);
  405. scsi_target_block(&shost->shost_gendev);
  406. if (fast_io_fail_tmo >= 0)
  407. queue_delayed_work(system_long_wq,
  408. &rport->fast_io_fail_work,
  409. 1UL * fast_io_fail_tmo * HZ);
  410. if (dev_loss_tmo >= 0)
  411. queue_delayed_work(system_long_wq,
  412. &rport->dev_loss_work,
  413. 1UL * dev_loss_tmo * HZ);
  414. }
  415. }
  416. /**
  417. * srp_start_tl_fail_timers() - start the transport layer failure timers
  418. * @rport: SRP target port.
  419. *
  420. * Start the transport layer fast I/O failure and device loss timers. Do not
  421. * modify a timer that was already started.
  422. */
  423. void srp_start_tl_fail_timers(struct srp_rport *rport)
  424. {
  425. mutex_lock(&rport->mutex);
  426. __srp_start_tl_fail_timers(rport);
  427. mutex_unlock(&rport->mutex);
  428. }
  429. EXPORT_SYMBOL(srp_start_tl_fail_timers);
  430. /**
  431. * srp_reconnect_rport() - reconnect to an SRP target port
  432. * @rport: SRP target port.
  433. *
  434. * Blocks SCSI command queueing before invoking reconnect() such that
  435. * queuecommand() won't be invoked concurrently with reconnect() from outside
  436. * the SCSI EH. This is important since a reconnect() implementation may
  437. * reallocate resources needed by queuecommand().
  438. *
  439. * Notes:
  440. * - This function neither waits until outstanding requests have finished nor
  441. * tries to abort these. It is the responsibility of the reconnect()
  442. * function to finish outstanding commands before reconnecting to the target
  443. * port.
  444. * - It is the responsibility of the caller to ensure that the resources
  445. * reallocated by the reconnect() function won't be used while this function
  446. * is in progress. One possible strategy is to invoke this function from
  447. * the context of the SCSI EH thread only. Another possible strategy is to
  448. * lock the rport mutex inside each SCSI LLD callback that can be invoked by
  449. * the SCSI EH (the scsi_host_template.eh_*() functions and also the
  450. * scsi_host_template.queuecommand() function).
  451. */
  452. int srp_reconnect_rport(struct srp_rport *rport)
  453. {
  454. struct Scsi_Host *shost = rport_to_shost(rport);
  455. struct srp_internal *i = to_srp_internal(shost->transportt);
  456. struct scsi_device *sdev;
  457. int res;
  458. pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev));
  459. res = mutex_lock_interruptible(&rport->mutex);
  460. if (res)
  461. goto out;
  462. if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
  463. /*
  464. * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
  465. * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
  466. * later is ok though, scsi_internal_device_unblock_nowait()
  467. * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
  468. */
  469. scsi_target_block(&shost->shost_gendev);
  470. res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
  471. pr_debug("%s (state %d): transport.reconnect() returned %d\n",
  472. dev_name(&shost->shost_gendev), rport->state, res);
  473. if (res == 0) {
  474. cancel_delayed_work(&rport->fast_io_fail_work);
  475. cancel_delayed_work(&rport->dev_loss_work);
  476. rport->failed_reconnects = 0;
  477. srp_rport_set_state(rport, SRP_RPORT_RUNNING);
  478. scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING);
  479. /*
  480. * If the SCSI error handler has offlined one or more devices,
  481. * invoking scsi_target_unblock() won't change the state of
  482. * these devices into running so do that explicitly.
  483. */
  484. shost_for_each_device(sdev, shost) {
  485. mutex_lock(&sdev->state_mutex);
  486. if (sdev->sdev_state == SDEV_OFFLINE)
  487. sdev->sdev_state = SDEV_RUNNING;
  488. mutex_unlock(&sdev->state_mutex);
  489. }
  490. } else if (rport->state == SRP_RPORT_RUNNING) {
  491. /*
  492. * srp_reconnect_rport() has been invoked with fast_io_fail
  493. * and dev_loss off. Mark the port as failed and start the TL
  494. * failure timers if these had not yet been started.
  495. */
  496. __rport_fail_io_fast(rport);
  497. __srp_start_tl_fail_timers(rport);
  498. } else if (rport->state != SRP_RPORT_BLOCKED) {
  499. scsi_target_unblock(&shost->shost_gendev,
  500. SDEV_TRANSPORT_OFFLINE);
  501. }
  502. mutex_unlock(&rport->mutex);
  503. out:
  504. return res;
  505. }
  506. EXPORT_SYMBOL(srp_reconnect_rport);
  507. /**
  508. * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
  509. * @scmd: SCSI command.
  510. *
  511. * If a timeout occurs while an rport is in the blocked state, ask the SCSI
  512. * EH to continue waiting (SCSI_EH_RESET_TIMER). Otherwise let the SCSI core
  513. * handle the timeout (SCSI_EH_NOT_HANDLED).
  514. *
  515. * Note: This function is called from soft-IRQ context and with the request
  516. * queue lock held.
  517. */
  518. enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd)
  519. {
  520. struct scsi_device *sdev = scmd->device;
  521. struct Scsi_Host *shost = sdev->host;
  522. struct srp_internal *i = to_srp_internal(shost->transportt);
  523. struct srp_rport *rport = shost_to_rport(shost);
  524. pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
  525. return rport && rport->fast_io_fail_tmo < 0 &&
  526. rport->dev_loss_tmo < 0 &&
  527. i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
  528. SCSI_EH_RESET_TIMER : SCSI_EH_NOT_HANDLED;
  529. }
  530. EXPORT_SYMBOL(srp_timed_out);
  531. static void srp_rport_release(struct device *dev)
  532. {
  533. struct srp_rport *rport = dev_to_rport(dev);
  534. put_device(dev->parent);
  535. kfree(rport);
  536. }
  537. static int scsi_is_srp_rport(const struct device *dev)
  538. {
  539. return dev->release == srp_rport_release;
  540. }
  541. static int srp_rport_match(struct attribute_container *cont,
  542. struct device *dev)
  543. {
  544. struct Scsi_Host *shost;
  545. struct srp_internal *i;
  546. if (!scsi_is_srp_rport(dev))
  547. return 0;
  548. shost = dev_to_shost(dev->parent);
  549. if (!shost->transportt)
  550. return 0;
  551. if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
  552. return 0;
  553. i = to_srp_internal(shost->transportt);
  554. return &i->rport_attr_cont.ac == cont;
  555. }
  556. static int srp_host_match(struct attribute_container *cont, struct device *dev)
  557. {
  558. struct Scsi_Host *shost;
  559. struct srp_internal *i;
  560. if (!scsi_is_host_device(dev))
  561. return 0;
  562. shost = dev_to_shost(dev);
  563. if (!shost->transportt)
  564. return 0;
  565. if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
  566. return 0;
  567. i = to_srp_internal(shost->transportt);
  568. return &i->t.host_attrs.ac == cont;
  569. }
  570. /**
  571. * srp_rport_get() - increment rport reference count
  572. * @rport: SRP target port.
  573. */
  574. void srp_rport_get(struct srp_rport *rport)
  575. {
  576. get_device(&rport->dev);
  577. }
  578. EXPORT_SYMBOL(srp_rport_get);
  579. /**
  580. * srp_rport_put() - decrement rport reference count
  581. * @rport: SRP target port.
  582. */
  583. void srp_rport_put(struct srp_rport *rport)
  584. {
  585. put_device(&rport->dev);
  586. }
  587. EXPORT_SYMBOL(srp_rport_put);
  588. /**
  589. * srp_rport_add - add a SRP remote port to the device hierarchy
  590. * @shost: scsi host the remote port is connected to.
  591. * @ids: The port id for the remote port.
  592. *
  593. * Publishes a port to the rest of the system.
  594. */
  595. struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
  596. struct srp_rport_identifiers *ids)
  597. {
  598. struct srp_rport *rport;
  599. struct device *parent = &shost->shost_gendev;
  600. struct srp_internal *i = to_srp_internal(shost->transportt);
  601. int id, ret;
  602. rport = kzalloc(sizeof(*rport), GFP_KERNEL);
  603. if (!rport)
  604. return ERR_PTR(-ENOMEM);
  605. mutex_init(&rport->mutex);
  606. device_initialize(&rport->dev);
  607. rport->dev.parent = get_device(parent);
  608. rport->dev.release = srp_rport_release;
  609. memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
  610. rport->roles = ids->roles;
  611. if (i->f->reconnect)
  612. rport->reconnect_delay = i->f->reconnect_delay ?
  613. *i->f->reconnect_delay : 10;
  614. INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
  615. rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
  616. *i->f->fast_io_fail_tmo : 15;
  617. rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
  618. INIT_DELAYED_WORK(&rport->fast_io_fail_work,
  619. rport_fast_io_fail_timedout);
  620. INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
  621. id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
  622. dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
  623. transport_setup_device(&rport->dev);
  624. ret = device_add(&rport->dev);
  625. if (ret) {
  626. transport_destroy_device(&rport->dev);
  627. put_device(&rport->dev);
  628. return ERR_PTR(ret);
  629. }
  630. transport_add_device(&rport->dev);
  631. transport_configure_device(&rport->dev);
  632. return rport;
  633. }
  634. EXPORT_SYMBOL_GPL(srp_rport_add);
  635. /**
  636. * srp_rport_del - remove a SRP remote port
  637. * @rport: SRP remote port to remove
  638. *
  639. * Removes the specified SRP remote port.
  640. */
  641. void srp_rport_del(struct srp_rport *rport)
  642. {
  643. struct device *dev = &rport->dev;
  644. transport_remove_device(dev);
  645. device_del(dev);
  646. transport_destroy_device(dev);
  647. put_device(dev);
  648. }
  649. EXPORT_SYMBOL_GPL(srp_rport_del);
  650. static int do_srp_rport_del(struct device *dev, void *data)
  651. {
  652. if (scsi_is_srp_rport(dev))
  653. srp_rport_del(dev_to_rport(dev));
  654. return 0;
  655. }
  656. /**
  657. * srp_remove_host - tear down a Scsi_Host's SRP data structures
  658. * @shost: Scsi Host that is torn down
  659. *
  660. * Removes all SRP remote ports for a given Scsi_Host.
  661. * Must be called just before scsi_remove_host for SRP HBAs.
  662. */
  663. void srp_remove_host(struct Scsi_Host *shost)
  664. {
  665. device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
  666. }
  667. EXPORT_SYMBOL_GPL(srp_remove_host);
  668. /**
  669. * srp_stop_rport_timers - stop the transport layer recovery timers
  670. * @rport: SRP remote port for which to stop the timers.
  671. *
  672. * Must be called after srp_remove_host() and scsi_remove_host(). The caller
  673. * must hold a reference on the rport (rport->dev) and on the SCSI host
  674. * (rport->dev.parent).
  675. */
  676. void srp_stop_rport_timers(struct srp_rport *rport)
  677. {
  678. mutex_lock(&rport->mutex);
  679. if (rport->state == SRP_RPORT_BLOCKED)
  680. __rport_fail_io_fast(rport);
  681. srp_rport_set_state(rport, SRP_RPORT_LOST);
  682. mutex_unlock(&rport->mutex);
  683. cancel_delayed_work_sync(&rport->reconnect_work);
  684. cancel_delayed_work_sync(&rport->fast_io_fail_work);
  685. cancel_delayed_work_sync(&rport->dev_loss_work);
  686. }
  687. EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
  688. /**
  689. * srp_attach_transport - instantiate SRP transport template
  690. * @ft: SRP transport class function template
  691. */
  692. struct scsi_transport_template *
  693. srp_attach_transport(struct srp_function_template *ft)
  694. {
  695. int count;
  696. struct srp_internal *i;
  697. i = kzalloc(sizeof(*i), GFP_KERNEL);
  698. if (!i)
  699. return NULL;
  700. i->t.host_size = sizeof(struct srp_host_attrs);
  701. i->t.host_attrs.ac.attrs = &i->host_attrs[0];
  702. i->t.host_attrs.ac.class = &srp_host_class.class;
  703. i->t.host_attrs.ac.match = srp_host_match;
  704. i->host_attrs[0] = NULL;
  705. transport_container_register(&i->t.host_attrs);
  706. i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
  707. i->rport_attr_cont.ac.class = &srp_rport_class.class;
  708. i->rport_attr_cont.ac.match = srp_rport_match;
  709. count = 0;
  710. i->rport_attrs[count++] = &dev_attr_port_id;
  711. i->rport_attrs[count++] = &dev_attr_roles;
  712. if (ft->has_rport_state) {
  713. i->rport_attrs[count++] = &dev_attr_state;
  714. i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo;
  715. i->rport_attrs[count++] = &dev_attr_dev_loss_tmo;
  716. }
  717. if (ft->reconnect) {
  718. i->rport_attrs[count++] = &dev_attr_reconnect_delay;
  719. i->rport_attrs[count++] = &dev_attr_failed_reconnects;
  720. }
  721. if (ft->rport_delete)
  722. i->rport_attrs[count++] = &dev_attr_delete;
  723. i->rport_attrs[count++] = NULL;
  724. BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
  725. transport_container_register(&i->rport_attr_cont);
  726. i->f = ft;
  727. return &i->t;
  728. }
  729. EXPORT_SYMBOL_GPL(srp_attach_transport);
  730. /**
  731. * srp_release_transport - release SRP transport template instance
  732. * @t: transport template instance
  733. */
  734. void srp_release_transport(struct scsi_transport_template *t)
  735. {
  736. struct srp_internal *i = to_srp_internal(t);
  737. transport_container_unregister(&i->t.host_attrs);
  738. transport_container_unregister(&i->rport_attr_cont);
  739. kfree(i);
  740. }
  741. EXPORT_SYMBOL_GPL(srp_release_transport);
  742. static __init int srp_transport_init(void)
  743. {
  744. int ret;
  745. ret = transport_class_register(&srp_host_class);
  746. if (ret)
  747. return ret;
  748. ret = transport_class_register(&srp_rport_class);
  749. if (ret)
  750. goto unregister_host_class;
  751. return 0;
  752. unregister_host_class:
  753. transport_class_unregister(&srp_host_class);
  754. return ret;
  755. }
  756. static void __exit srp_transport_exit(void)
  757. {
  758. transport_class_unregister(&srp_host_class);
  759. transport_class_unregister(&srp_rport_class);
  760. }
  761. MODULE_AUTHOR("FUJITA Tomonori");
  762. MODULE_DESCRIPTION("SRP Transport Attributes");
  763. MODULE_LICENSE("GPL");
  764. module_init(srp_transport_init);
  765. module_exit(srp_transport_exit);