css.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * driver for channel subsystem
  4. *
  5. * Copyright IBM Corp. 2002, 2010
  6. *
  7. * Author(s): Arnd Bergmann ([email protected])
  8. * Cornelia Huck ([email protected])
  9. */
  10. #define KMSG_COMPONENT "cio"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/export.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <linux/slab.h>
  16. #include <linux/errno.h>
  17. #include <linux/list.h>
  18. #include <linux/reboot.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/genalloc.h>
  21. #include <linux/dma-mapping.h>
  22. #include <asm/isc.h>
  23. #include <asm/crw.h>
  24. #include "css.h"
  25. #include "cio.h"
  26. #include "blacklist.h"
  27. #include "cio_debug.h"
  28. #include "ioasm.h"
  29. #include "chsc.h"
  30. #include "device.h"
  31. #include "idset.h"
  32. #include "chp.h"
  33. int css_init_done = 0;
  34. int max_ssid;
  35. #define MAX_CSS_IDX 0
  36. struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
  37. static struct bus_type css_bus_type;
  38. int
  39. for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  40. {
  41. struct subchannel_id schid;
  42. int ret;
  43. init_subchannel_id(&schid);
  44. do {
  45. do {
  46. ret = fn(schid, data);
  47. if (ret)
  48. break;
  49. } while (schid.sch_no++ < __MAX_SUBCHANNEL);
  50. schid.sch_no = 0;
  51. } while (schid.ssid++ < max_ssid);
  52. return ret;
  53. }
  54. struct cb_data {
  55. void *data;
  56. struct idset *set;
  57. int (*fn_known_sch)(struct subchannel *, void *);
  58. int (*fn_unknown_sch)(struct subchannel_id, void *);
  59. };
  60. static int call_fn_known_sch(struct device *dev, void *data)
  61. {
  62. struct subchannel *sch = to_subchannel(dev);
  63. struct cb_data *cb = data;
  64. int rc = 0;
  65. if (cb->set)
  66. idset_sch_del(cb->set, sch->schid);
  67. if (cb->fn_known_sch)
  68. rc = cb->fn_known_sch(sch, cb->data);
  69. return rc;
  70. }
  71. static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  72. {
  73. struct cb_data *cb = data;
  74. int rc = 0;
  75. if (idset_sch_contains(cb->set, schid))
  76. rc = cb->fn_unknown_sch(schid, cb->data);
  77. return rc;
  78. }
  79. static int call_fn_all_sch(struct subchannel_id schid, void *data)
  80. {
  81. struct cb_data *cb = data;
  82. struct subchannel *sch;
  83. int rc = 0;
  84. sch = get_subchannel_by_schid(schid);
  85. if (sch) {
  86. if (cb->fn_known_sch)
  87. rc = cb->fn_known_sch(sch, cb->data);
  88. put_device(&sch->dev);
  89. } else {
  90. if (cb->fn_unknown_sch)
  91. rc = cb->fn_unknown_sch(schid, cb->data);
  92. }
  93. return rc;
  94. }
  95. int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
  96. int (*fn_unknown)(struct subchannel_id,
  97. void *), void *data)
  98. {
  99. struct cb_data cb;
  100. int rc;
  101. cb.data = data;
  102. cb.fn_known_sch = fn_known;
  103. cb.fn_unknown_sch = fn_unknown;
  104. if (fn_known && !fn_unknown) {
  105. /* Skip idset allocation in case of known-only loop. */
  106. cb.set = NULL;
  107. return bus_for_each_dev(&css_bus_type, NULL, &cb,
  108. call_fn_known_sch);
  109. }
  110. cb.set = idset_sch_new();
  111. if (!cb.set)
  112. /* fall back to brute force scanning in case of oom */
  113. return for_each_subchannel(call_fn_all_sch, &cb);
  114. idset_fill(cb.set);
  115. /* Process registered subchannels. */
  116. rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
  117. if (rc)
  118. goto out;
  119. /* Process unregistered subchannels. */
  120. if (fn_unknown)
  121. rc = for_each_subchannel(call_fn_unknown_sch, &cb);
  122. out:
  123. idset_free(cb.set);
  124. return rc;
  125. }
  126. static void css_sch_todo(struct work_struct *work);
  127. static int css_sch_create_locks(struct subchannel *sch)
  128. {
  129. sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
  130. if (!sch->lock)
  131. return -ENOMEM;
  132. spin_lock_init(sch->lock);
  133. mutex_init(&sch->reg_mutex);
  134. return 0;
  135. }
  136. static void css_subchannel_release(struct device *dev)
  137. {
  138. struct subchannel *sch = to_subchannel(dev);
  139. sch->config.intparm = 0;
  140. cio_commit_config(sch);
  141. kfree(sch->driver_override);
  142. kfree(sch->lock);
  143. kfree(sch);
  144. }
  145. static int css_validate_subchannel(struct subchannel_id schid,
  146. struct schib *schib)
  147. {
  148. int err;
  149. switch (schib->pmcw.st) {
  150. case SUBCHANNEL_TYPE_IO:
  151. case SUBCHANNEL_TYPE_MSG:
  152. if (!css_sch_is_valid(schib))
  153. err = -ENODEV;
  154. else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
  155. CIO_MSG_EVENT(6, "Blacklisted device detected "
  156. "at devno %04X, subchannel set %x\n",
  157. schib->pmcw.dev, schid.ssid);
  158. err = -ENODEV;
  159. } else
  160. err = 0;
  161. break;
  162. default:
  163. err = 0;
  164. }
  165. if (err)
  166. goto out;
  167. CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
  168. schid.ssid, schid.sch_no, schib->pmcw.st);
  169. out:
  170. return err;
  171. }
  172. struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
  173. struct schib *schib)
  174. {
  175. struct subchannel *sch;
  176. int ret;
  177. ret = css_validate_subchannel(schid, schib);
  178. if (ret < 0)
  179. return ERR_PTR(ret);
  180. sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
  181. if (!sch)
  182. return ERR_PTR(-ENOMEM);
  183. sch->schid = schid;
  184. sch->schib = *schib;
  185. sch->st = schib->pmcw.st;
  186. ret = css_sch_create_locks(sch);
  187. if (ret)
  188. goto err;
  189. INIT_WORK(&sch->todo_work, css_sch_todo);
  190. sch->dev.release = &css_subchannel_release;
  191. sch->dev.dma_mask = &sch->dma_mask;
  192. device_initialize(&sch->dev);
  193. /*
  194. * The physical addresses for some of the dma structures that can
  195. * belong to a subchannel need to fit 31 bit width (e.g. ccw).
  196. */
  197. ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
  198. if (ret)
  199. goto err_lock;
  200. /*
  201. * But we don't have such restrictions imposed on the stuff that
  202. * is handled by the streaming API.
  203. */
  204. ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
  205. if (ret)
  206. goto err_lock;
  207. return sch;
  208. err_lock:
  209. kfree(sch->lock);
  210. err:
  211. kfree(sch);
  212. return ERR_PTR(ret);
  213. }
  214. static int css_sch_device_register(struct subchannel *sch)
  215. {
  216. int ret;
  217. mutex_lock(&sch->reg_mutex);
  218. dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
  219. sch->schid.sch_no);
  220. ret = device_add(&sch->dev);
  221. mutex_unlock(&sch->reg_mutex);
  222. return ret;
  223. }
  224. /**
  225. * css_sch_device_unregister - unregister a subchannel
  226. * @sch: subchannel to be unregistered
  227. */
  228. void css_sch_device_unregister(struct subchannel *sch)
  229. {
  230. mutex_lock(&sch->reg_mutex);
  231. if (device_is_registered(&sch->dev))
  232. device_unregister(&sch->dev);
  233. mutex_unlock(&sch->reg_mutex);
  234. }
  235. EXPORT_SYMBOL_GPL(css_sch_device_unregister);
  236. static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
  237. {
  238. int i;
  239. int mask;
  240. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  241. ssd->path_mask = pmcw->pim;
  242. for (i = 0; i < 8; i++) {
  243. mask = 0x80 >> i;
  244. if (pmcw->pim & mask) {
  245. chp_id_init(&ssd->chpid[i]);
  246. ssd->chpid[i].id = pmcw->chpid[i];
  247. }
  248. }
  249. }
  250. static void ssd_register_chpids(struct chsc_ssd_info *ssd)
  251. {
  252. int i;
  253. int mask;
  254. for (i = 0; i < 8; i++) {
  255. mask = 0x80 >> i;
  256. if (ssd->path_mask & mask)
  257. chp_new(ssd->chpid[i]);
  258. }
  259. }
  260. void css_update_ssd_info(struct subchannel *sch)
  261. {
  262. int ret;
  263. ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
  264. if (ret)
  265. ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
  266. ssd_register_chpids(&sch->ssd_info);
  267. }
  268. static ssize_t type_show(struct device *dev, struct device_attribute *attr,
  269. char *buf)
  270. {
  271. struct subchannel *sch = to_subchannel(dev);
  272. return sprintf(buf, "%01x\n", sch->st);
  273. }
  274. static DEVICE_ATTR_RO(type);
  275. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  276. char *buf)
  277. {
  278. struct subchannel *sch = to_subchannel(dev);
  279. return sprintf(buf, "css:t%01X\n", sch->st);
  280. }
  281. static DEVICE_ATTR_RO(modalias);
  282. static ssize_t driver_override_store(struct device *dev,
  283. struct device_attribute *attr,
  284. const char *buf, size_t count)
  285. {
  286. struct subchannel *sch = to_subchannel(dev);
  287. int ret;
  288. ret = driver_set_override(dev, &sch->driver_override, buf, count);
  289. if (ret)
  290. return ret;
  291. return count;
  292. }
  293. static ssize_t driver_override_show(struct device *dev,
  294. struct device_attribute *attr, char *buf)
  295. {
  296. struct subchannel *sch = to_subchannel(dev);
  297. ssize_t len;
  298. device_lock(dev);
  299. len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
  300. device_unlock(dev);
  301. return len;
  302. }
  303. static DEVICE_ATTR_RW(driver_override);
  304. static struct attribute *subch_attrs[] = {
  305. &dev_attr_type.attr,
  306. &dev_attr_modalias.attr,
  307. &dev_attr_driver_override.attr,
  308. NULL,
  309. };
  310. static struct attribute_group subch_attr_group = {
  311. .attrs = subch_attrs,
  312. };
  313. static const struct attribute_group *default_subch_attr_groups[] = {
  314. &subch_attr_group,
  315. NULL,
  316. };
  317. static ssize_t chpids_show(struct device *dev,
  318. struct device_attribute *attr,
  319. char *buf)
  320. {
  321. struct subchannel *sch = to_subchannel(dev);
  322. struct chsc_ssd_info *ssd = &sch->ssd_info;
  323. ssize_t ret = 0;
  324. int mask;
  325. int chp;
  326. for (chp = 0; chp < 8; chp++) {
  327. mask = 0x80 >> chp;
  328. if (ssd->path_mask & mask)
  329. ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
  330. else
  331. ret += sprintf(buf + ret, "00 ");
  332. }
  333. ret += sprintf(buf + ret, "\n");
  334. return ret;
  335. }
  336. static DEVICE_ATTR_RO(chpids);
  337. static ssize_t pimpampom_show(struct device *dev,
  338. struct device_attribute *attr,
  339. char *buf)
  340. {
  341. struct subchannel *sch = to_subchannel(dev);
  342. struct pmcw *pmcw = &sch->schib.pmcw;
  343. return sprintf(buf, "%02x %02x %02x\n",
  344. pmcw->pim, pmcw->pam, pmcw->pom);
  345. }
  346. static DEVICE_ATTR_RO(pimpampom);
  347. static ssize_t dev_busid_show(struct device *dev,
  348. struct device_attribute *attr,
  349. char *buf)
  350. {
  351. struct subchannel *sch = to_subchannel(dev);
  352. struct pmcw *pmcw = &sch->schib.pmcw;
  353. if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
  354. (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
  355. return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
  356. pmcw->dev);
  357. else
  358. return sysfs_emit(buf, "none\n");
  359. }
  360. static DEVICE_ATTR_RO(dev_busid);
  361. static struct attribute *io_subchannel_type_attrs[] = {
  362. &dev_attr_chpids.attr,
  363. &dev_attr_pimpampom.attr,
  364. &dev_attr_dev_busid.attr,
  365. NULL,
  366. };
  367. ATTRIBUTE_GROUPS(io_subchannel_type);
  368. static const struct device_type io_subchannel_type = {
  369. .groups = io_subchannel_type_groups,
  370. };
  371. int css_register_subchannel(struct subchannel *sch)
  372. {
  373. int ret;
  374. /* Initialize the subchannel structure */
  375. sch->dev.parent = &channel_subsystems[0]->device;
  376. sch->dev.bus = &css_bus_type;
  377. sch->dev.groups = default_subch_attr_groups;
  378. if (sch->st == SUBCHANNEL_TYPE_IO)
  379. sch->dev.type = &io_subchannel_type;
  380. css_update_ssd_info(sch);
  381. /* make it known to the system */
  382. ret = css_sch_device_register(sch);
  383. if (ret) {
  384. CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
  385. sch->schid.ssid, sch->schid.sch_no, ret);
  386. return ret;
  387. }
  388. return ret;
  389. }
  390. static int css_probe_device(struct subchannel_id schid, struct schib *schib)
  391. {
  392. struct subchannel *sch;
  393. int ret;
  394. sch = css_alloc_subchannel(schid, schib);
  395. if (IS_ERR(sch))
  396. return PTR_ERR(sch);
  397. ret = css_register_subchannel(sch);
  398. if (ret)
  399. put_device(&sch->dev);
  400. return ret;
  401. }
  402. static int
  403. check_subchannel(struct device *dev, const void *data)
  404. {
  405. struct subchannel *sch;
  406. struct subchannel_id *schid = (void *)data;
  407. sch = to_subchannel(dev);
  408. return schid_equal(&sch->schid, schid);
  409. }
  410. struct subchannel *
  411. get_subchannel_by_schid(struct subchannel_id schid)
  412. {
  413. struct device *dev;
  414. dev = bus_find_device(&css_bus_type, NULL,
  415. &schid, check_subchannel);
  416. return dev ? to_subchannel(dev) : NULL;
  417. }
  418. /**
  419. * css_sch_is_valid() - check if a subchannel is valid
  420. * @schib: subchannel information block for the subchannel
  421. */
  422. int css_sch_is_valid(struct schib *schib)
  423. {
  424. if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
  425. return 0;
  426. if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
  427. return 0;
  428. return 1;
  429. }
  430. EXPORT_SYMBOL_GPL(css_sch_is_valid);
  431. static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
  432. {
  433. struct schib schib;
  434. int ccode;
  435. if (!slow) {
  436. /* Will be done on the slow path. */
  437. return -EAGAIN;
  438. }
  439. /*
  440. * The first subchannel that is not-operational (ccode==3)
  441. * indicates that there aren't any more devices available.
  442. * If stsch gets an exception, it means the current subchannel set
  443. * is not valid.
  444. */
  445. ccode = stsch(schid, &schib);
  446. if (ccode)
  447. return (ccode == 3) ? -ENXIO : ccode;
  448. return css_probe_device(schid, &schib);
  449. }
  450. static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
  451. {
  452. int ret = 0;
  453. if (sch->driver) {
  454. if (sch->driver->sch_event)
  455. ret = sch->driver->sch_event(sch, slow);
  456. else
  457. dev_dbg(&sch->dev,
  458. "Got subchannel machine check but "
  459. "no sch_event handler provided.\n");
  460. }
  461. if (ret != 0 && ret != -EAGAIN) {
  462. CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
  463. sch->schid.ssid, sch->schid.sch_no, ret);
  464. }
  465. return ret;
  466. }
  467. static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
  468. {
  469. struct subchannel *sch;
  470. int ret;
  471. sch = get_subchannel_by_schid(schid);
  472. if (sch) {
  473. ret = css_evaluate_known_subchannel(sch, slow);
  474. put_device(&sch->dev);
  475. } else
  476. ret = css_evaluate_new_subchannel(schid, slow);
  477. if (ret == -EAGAIN)
  478. css_schedule_eval(schid);
  479. }
  480. /**
  481. * css_sched_sch_todo - schedule a subchannel operation
  482. * @sch: subchannel
  483. * @todo: todo
  484. *
  485. * Schedule the operation identified by @todo to be performed on the slow path
  486. * workqueue. Do nothing if another operation with higher priority is already
  487. * scheduled. Needs to be called with subchannel lock held.
  488. */
  489. void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
  490. {
  491. CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
  492. sch->schid.ssid, sch->schid.sch_no, todo);
  493. if (sch->todo >= todo)
  494. return;
  495. /* Get workqueue ref. */
  496. if (!get_device(&sch->dev))
  497. return;
  498. sch->todo = todo;
  499. if (!queue_work(cio_work_q, &sch->todo_work)) {
  500. /* Already queued, release workqueue ref. */
  501. put_device(&sch->dev);
  502. }
  503. }
  504. EXPORT_SYMBOL_GPL(css_sched_sch_todo);
  505. static void css_sch_todo(struct work_struct *work)
  506. {
  507. struct subchannel *sch;
  508. enum sch_todo todo;
  509. int ret;
  510. sch = container_of(work, struct subchannel, todo_work);
  511. /* Find out todo. */
  512. spin_lock_irq(sch->lock);
  513. todo = sch->todo;
  514. CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
  515. sch->schid.sch_no, todo);
  516. sch->todo = SCH_TODO_NOTHING;
  517. spin_unlock_irq(sch->lock);
  518. /* Perform todo. */
  519. switch (todo) {
  520. case SCH_TODO_NOTHING:
  521. break;
  522. case SCH_TODO_EVAL:
  523. ret = css_evaluate_known_subchannel(sch, 1);
  524. if (ret == -EAGAIN) {
  525. spin_lock_irq(sch->lock);
  526. css_sched_sch_todo(sch, todo);
  527. spin_unlock_irq(sch->lock);
  528. }
  529. break;
  530. case SCH_TODO_UNREG:
  531. css_sch_device_unregister(sch);
  532. break;
  533. }
  534. /* Release workqueue ref. */
  535. put_device(&sch->dev);
  536. }
  537. static struct idset *slow_subchannel_set;
  538. static DEFINE_SPINLOCK(slow_subchannel_lock);
  539. static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
  540. static atomic_t css_eval_scheduled;
  541. static int __init slow_subchannel_init(void)
  542. {
  543. atomic_set(&css_eval_scheduled, 0);
  544. slow_subchannel_set = idset_sch_new();
  545. if (!slow_subchannel_set) {
  546. CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
  547. return -ENOMEM;
  548. }
  549. return 0;
  550. }
  551. static int slow_eval_known_fn(struct subchannel *sch, void *data)
  552. {
  553. int eval;
  554. int rc;
  555. spin_lock_irq(&slow_subchannel_lock);
  556. eval = idset_sch_contains(slow_subchannel_set, sch->schid);
  557. idset_sch_del(slow_subchannel_set, sch->schid);
  558. spin_unlock_irq(&slow_subchannel_lock);
  559. if (eval) {
  560. rc = css_evaluate_known_subchannel(sch, 1);
  561. if (rc == -EAGAIN)
  562. css_schedule_eval(sch->schid);
  563. /*
  564. * The loop might take long time for platforms with lots of
  565. * known devices. Allow scheduling here.
  566. */
  567. cond_resched();
  568. }
  569. return 0;
  570. }
  571. static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
  572. {
  573. int eval;
  574. int rc = 0;
  575. spin_lock_irq(&slow_subchannel_lock);
  576. eval = idset_sch_contains(slow_subchannel_set, schid);
  577. idset_sch_del(slow_subchannel_set, schid);
  578. spin_unlock_irq(&slow_subchannel_lock);
  579. if (eval) {
  580. rc = css_evaluate_new_subchannel(schid, 1);
  581. switch (rc) {
  582. case -EAGAIN:
  583. css_schedule_eval(schid);
  584. rc = 0;
  585. break;
  586. case -ENXIO:
  587. case -ENOMEM:
  588. case -EIO:
  589. /* These should abort looping */
  590. spin_lock_irq(&slow_subchannel_lock);
  591. idset_sch_del_subseq(slow_subchannel_set, schid);
  592. spin_unlock_irq(&slow_subchannel_lock);
  593. break;
  594. default:
  595. rc = 0;
  596. }
  597. /* Allow scheduling here since the containing loop might
  598. * take a while. */
  599. cond_resched();
  600. }
  601. return rc;
  602. }
  603. static void css_slow_path_func(struct work_struct *unused)
  604. {
  605. unsigned long flags;
  606. CIO_TRACE_EVENT(4, "slowpath");
  607. for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
  608. NULL);
  609. spin_lock_irqsave(&slow_subchannel_lock, flags);
  610. if (idset_is_empty(slow_subchannel_set)) {
  611. atomic_set(&css_eval_scheduled, 0);
  612. wake_up(&css_eval_wq);
  613. }
  614. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  615. }
  616. static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
  617. struct workqueue_struct *cio_work_q;
  618. void css_schedule_eval(struct subchannel_id schid)
  619. {
  620. unsigned long flags;
  621. spin_lock_irqsave(&slow_subchannel_lock, flags);
  622. idset_sch_add(slow_subchannel_set, schid);
  623. atomic_set(&css_eval_scheduled, 1);
  624. queue_delayed_work(cio_work_q, &slow_path_work, 0);
  625. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  626. }
  627. void css_schedule_eval_all(void)
  628. {
  629. unsigned long flags;
  630. spin_lock_irqsave(&slow_subchannel_lock, flags);
  631. idset_fill(slow_subchannel_set);
  632. atomic_set(&css_eval_scheduled, 1);
  633. queue_delayed_work(cio_work_q, &slow_path_work, 0);
  634. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  635. }
  636. static int __unset_registered(struct device *dev, void *data)
  637. {
  638. struct idset *set = data;
  639. struct subchannel *sch = to_subchannel(dev);
  640. idset_sch_del(set, sch->schid);
  641. return 0;
  642. }
  643. static int __unset_online(struct device *dev, void *data)
  644. {
  645. struct idset *set = data;
  646. struct subchannel *sch = to_subchannel(dev);
  647. if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
  648. idset_sch_del(set, sch->schid);
  649. return 0;
  650. }
  651. void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
  652. {
  653. unsigned long flags;
  654. struct idset *set;
  655. /* Find unregistered subchannels. */
  656. set = idset_sch_new();
  657. if (!set) {
  658. /* Fallback. */
  659. css_schedule_eval_all();
  660. return;
  661. }
  662. idset_fill(set);
  663. switch (cond) {
  664. case CSS_EVAL_UNREG:
  665. bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
  666. break;
  667. case CSS_EVAL_NOT_ONLINE:
  668. bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
  669. break;
  670. default:
  671. break;
  672. }
  673. /* Apply to slow_subchannel_set. */
  674. spin_lock_irqsave(&slow_subchannel_lock, flags);
  675. idset_add_set(slow_subchannel_set, set);
  676. atomic_set(&css_eval_scheduled, 1);
  677. queue_delayed_work(cio_work_q, &slow_path_work, delay);
  678. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  679. idset_free(set);
  680. }
  681. void css_wait_for_slow_path(void)
  682. {
  683. flush_workqueue(cio_work_q);
  684. }
  685. /* Schedule reprobing of all unregistered subchannels. */
  686. void css_schedule_reprobe(void)
  687. {
  688. /* Schedule with a delay to allow merging of subsequent calls. */
  689. css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
  690. }
  691. EXPORT_SYMBOL_GPL(css_schedule_reprobe);
  692. /*
  693. * Called from the machine check handler for subchannel report words.
  694. */
  695. static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
  696. {
  697. struct subchannel_id mchk_schid;
  698. struct subchannel *sch;
  699. if (overflow) {
  700. css_schedule_eval_all();
  701. return;
  702. }
  703. CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
  704. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  705. crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
  706. crw0->erc, crw0->rsid);
  707. if (crw1)
  708. CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
  709. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  710. crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
  711. crw1->anc, crw1->erc, crw1->rsid);
  712. init_subchannel_id(&mchk_schid);
  713. mchk_schid.sch_no = crw0->rsid;
  714. if (crw1)
  715. mchk_schid.ssid = (crw1->rsid >> 4) & 3;
  716. if (crw0->erc == CRW_ERC_PMOD) {
  717. sch = get_subchannel_by_schid(mchk_schid);
  718. if (sch) {
  719. css_update_ssd_info(sch);
  720. put_device(&sch->dev);
  721. }
  722. }
  723. /*
  724. * Since we are always presented with IPI in the CRW, we have to
  725. * use stsch() to find out if the subchannel in question has come
  726. * or gone.
  727. */
  728. css_evaluate_subchannel(mchk_schid, 0);
  729. }
  730. static void __init
  731. css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
  732. {
  733. struct cpuid cpu_id;
  734. if (css_general_characteristics.mcss) {
  735. css->global_pgid.pgid_high.ext_cssid.version = 0x80;
  736. css->global_pgid.pgid_high.ext_cssid.cssid =
  737. css->id_valid ? css->cssid : 0;
  738. } else {
  739. css->global_pgid.pgid_high.cpu_addr = stap();
  740. }
  741. get_cpu_id(&cpu_id);
  742. css->global_pgid.cpu_id = cpu_id.ident;
  743. css->global_pgid.cpu_model = cpu_id.machine;
  744. css->global_pgid.tod_high = tod_high;
  745. }
  746. static void channel_subsystem_release(struct device *dev)
  747. {
  748. struct channel_subsystem *css = to_css(dev);
  749. mutex_destroy(&css->mutex);
  750. kfree(css);
  751. }
  752. static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
  753. char *buf)
  754. {
  755. struct channel_subsystem *css = to_css(dev);
  756. if (!css->id_valid)
  757. return -EINVAL;
  758. return sprintf(buf, "%x\n", css->cssid);
  759. }
  760. static DEVICE_ATTR_RO(real_cssid);
  761. static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
  762. const char *buf, size_t count)
  763. {
  764. CIO_TRACE_EVENT(4, "usr-rescan");
  765. css_schedule_eval_all();
  766. css_complete_work();
  767. return count;
  768. }
  769. static DEVICE_ATTR_WO(rescan);
  770. static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
  771. char *buf)
  772. {
  773. struct channel_subsystem *css = to_css(dev);
  774. int ret;
  775. mutex_lock(&css->mutex);
  776. ret = sprintf(buf, "%x\n", css->cm_enabled);
  777. mutex_unlock(&css->mutex);
  778. return ret;
  779. }
  780. static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
  781. const char *buf, size_t count)
  782. {
  783. struct channel_subsystem *css = to_css(dev);
  784. unsigned long val;
  785. int ret;
  786. ret = kstrtoul(buf, 16, &val);
  787. if (ret)
  788. return ret;
  789. mutex_lock(&css->mutex);
  790. switch (val) {
  791. case 0:
  792. ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
  793. break;
  794. case 1:
  795. ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
  796. break;
  797. default:
  798. ret = -EINVAL;
  799. }
  800. mutex_unlock(&css->mutex);
  801. return ret < 0 ? ret : count;
  802. }
  803. static DEVICE_ATTR_RW(cm_enable);
  804. static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
  805. int index)
  806. {
  807. return css_chsc_characteristics.secm ? attr->mode : 0;
  808. }
  809. static struct attribute *cssdev_attrs[] = {
  810. &dev_attr_real_cssid.attr,
  811. &dev_attr_rescan.attr,
  812. NULL,
  813. };
  814. static struct attribute_group cssdev_attr_group = {
  815. .attrs = cssdev_attrs,
  816. };
  817. static struct attribute *cssdev_cm_attrs[] = {
  818. &dev_attr_cm_enable.attr,
  819. NULL,
  820. };
  821. static struct attribute_group cssdev_cm_attr_group = {
  822. .attrs = cssdev_cm_attrs,
  823. .is_visible = cm_enable_mode,
  824. };
  825. static const struct attribute_group *cssdev_attr_groups[] = {
  826. &cssdev_attr_group,
  827. &cssdev_cm_attr_group,
  828. NULL,
  829. };
  830. static int __init setup_css(int nr)
  831. {
  832. struct channel_subsystem *css;
  833. int ret;
  834. css = kzalloc(sizeof(*css), GFP_KERNEL);
  835. if (!css)
  836. return -ENOMEM;
  837. channel_subsystems[nr] = css;
  838. dev_set_name(&css->device, "css%x", nr);
  839. css->device.groups = cssdev_attr_groups;
  840. css->device.release = channel_subsystem_release;
  841. /*
  842. * We currently allocate notifier bits with this (using
  843. * css->device as the device argument with the DMA API)
  844. * and are fine with 64 bit addresses.
  845. */
  846. ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
  847. if (ret) {
  848. kfree(css);
  849. goto out_err;
  850. }
  851. mutex_init(&css->mutex);
  852. ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
  853. if (!ret) {
  854. css->id_valid = true;
  855. pr_info("Partition identifier %01x.%01x\n", css->cssid,
  856. css->iid);
  857. }
  858. css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
  859. ret = device_register(&css->device);
  860. if (ret) {
  861. put_device(&css->device);
  862. goto out_err;
  863. }
  864. css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
  865. GFP_KERNEL);
  866. if (!css->pseudo_subchannel) {
  867. device_unregister(&css->device);
  868. ret = -ENOMEM;
  869. goto out_err;
  870. }
  871. css->pseudo_subchannel->dev.parent = &css->device;
  872. css->pseudo_subchannel->dev.release = css_subchannel_release;
  873. mutex_init(&css->pseudo_subchannel->reg_mutex);
  874. ret = css_sch_create_locks(css->pseudo_subchannel);
  875. if (ret) {
  876. kfree(css->pseudo_subchannel);
  877. device_unregister(&css->device);
  878. goto out_err;
  879. }
  880. dev_set_name(&css->pseudo_subchannel->dev, "defunct");
  881. ret = device_register(&css->pseudo_subchannel->dev);
  882. if (ret) {
  883. put_device(&css->pseudo_subchannel->dev);
  884. device_unregister(&css->device);
  885. goto out_err;
  886. }
  887. return ret;
  888. out_err:
  889. channel_subsystems[nr] = NULL;
  890. return ret;
  891. }
  892. static int css_reboot_event(struct notifier_block *this,
  893. unsigned long event,
  894. void *ptr)
  895. {
  896. struct channel_subsystem *css;
  897. int ret;
  898. ret = NOTIFY_DONE;
  899. for_each_css(css) {
  900. mutex_lock(&css->mutex);
  901. if (css->cm_enabled)
  902. if (chsc_secm(css, 0))
  903. ret = NOTIFY_BAD;
  904. mutex_unlock(&css->mutex);
  905. }
  906. return ret;
  907. }
  908. static struct notifier_block css_reboot_notifier = {
  909. .notifier_call = css_reboot_event,
  910. };
  911. #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
  912. static struct gen_pool *cio_dma_pool;
  913. /* Currently cio supports only a single css */
  914. struct device *cio_get_dma_css_dev(void)
  915. {
  916. return &channel_subsystems[0]->device;
  917. }
  918. struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
  919. {
  920. struct gen_pool *gp_dma;
  921. void *cpu_addr;
  922. dma_addr_t dma_addr;
  923. int i;
  924. gp_dma = gen_pool_create(3, -1);
  925. if (!gp_dma)
  926. return NULL;
  927. for (i = 0; i < nr_pages; ++i) {
  928. cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
  929. CIO_DMA_GFP);
  930. if (!cpu_addr)
  931. return gp_dma;
  932. gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
  933. dma_addr, PAGE_SIZE, -1);
  934. }
  935. return gp_dma;
  936. }
  937. static void __gp_dma_free_dma(struct gen_pool *pool,
  938. struct gen_pool_chunk *chunk, void *data)
  939. {
  940. size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
  941. dma_free_coherent((struct device *) data, chunk_size,
  942. (void *) chunk->start_addr,
  943. (dma_addr_t) chunk->phys_addr);
  944. }
  945. void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
  946. {
  947. if (!gp_dma)
  948. return;
  949. /* this is quite ugly but no better idea */
  950. gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
  951. gen_pool_destroy(gp_dma);
  952. }
  953. static int cio_dma_pool_init(void)
  954. {
  955. /* No need to free up the resources: compiled in */
  956. cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
  957. if (!cio_dma_pool)
  958. return -ENOMEM;
  959. return 0;
  960. }
  961. void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
  962. size_t size)
  963. {
  964. dma_addr_t dma_addr;
  965. unsigned long addr;
  966. size_t chunk_size;
  967. if (!gp_dma)
  968. return NULL;
  969. addr = gen_pool_alloc(gp_dma, size);
  970. while (!addr) {
  971. chunk_size = round_up(size, PAGE_SIZE);
  972. addr = (unsigned long) dma_alloc_coherent(dma_dev,
  973. chunk_size, &dma_addr, CIO_DMA_GFP);
  974. if (!addr)
  975. return NULL;
  976. gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
  977. addr = gen_pool_alloc(gp_dma, size);
  978. }
  979. return (void *) addr;
  980. }
  981. void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
  982. {
  983. if (!cpu_addr)
  984. return;
  985. memset(cpu_addr, 0, size);
  986. gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
  987. }
  988. /*
  989. * Allocate dma memory from the css global pool. Intended for memory not
  990. * specific to any single device within the css. The allocated memory
  991. * is not guaranteed to be 31-bit addressable.
  992. *
  993. * Caution: Not suitable for early stuff like console.
  994. */
  995. void *cio_dma_zalloc(size_t size)
  996. {
  997. return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
  998. }
  999. void cio_dma_free(void *cpu_addr, size_t size)
  1000. {
  1001. cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
  1002. }
  1003. /*
  1004. * Now that the driver core is running, we can setup our channel subsystem.
  1005. * The struct subchannel's are created during probing.
  1006. */
  1007. static int __init css_bus_init(void)
  1008. {
  1009. int ret, i;
  1010. ret = chsc_init();
  1011. if (ret)
  1012. return ret;
  1013. chsc_determine_css_characteristics();
  1014. /* Try to enable MSS. */
  1015. ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
  1016. if (ret)
  1017. max_ssid = 0;
  1018. else /* Success. */
  1019. max_ssid = __MAX_SSID;
  1020. ret = slow_subchannel_init();
  1021. if (ret)
  1022. goto out;
  1023. ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
  1024. if (ret)
  1025. goto out;
  1026. if ((ret = bus_register(&css_bus_type)))
  1027. goto out;
  1028. /* Setup css structure. */
  1029. for (i = 0; i <= MAX_CSS_IDX; i++) {
  1030. ret = setup_css(i);
  1031. if (ret)
  1032. goto out_unregister;
  1033. }
  1034. ret = register_reboot_notifier(&css_reboot_notifier);
  1035. if (ret)
  1036. goto out_unregister;
  1037. ret = cio_dma_pool_init();
  1038. if (ret)
  1039. goto out_unregister_rn;
  1040. airq_init();
  1041. css_init_done = 1;
  1042. /* Enable default isc for I/O subchannels. */
  1043. isc_register(IO_SCH_ISC);
  1044. return 0;
  1045. out_unregister_rn:
  1046. unregister_reboot_notifier(&css_reboot_notifier);
  1047. out_unregister:
  1048. while (i-- > 0) {
  1049. struct channel_subsystem *css = channel_subsystems[i];
  1050. device_unregister(&css->pseudo_subchannel->dev);
  1051. device_unregister(&css->device);
  1052. }
  1053. bus_unregister(&css_bus_type);
  1054. out:
  1055. crw_unregister_handler(CRW_RSC_SCH);
  1056. idset_free(slow_subchannel_set);
  1057. chsc_init_cleanup();
  1058. pr_alert("The CSS device driver initialization failed with "
  1059. "errno=%d\n", ret);
  1060. return ret;
  1061. }
  1062. static void __init css_bus_cleanup(void)
  1063. {
  1064. struct channel_subsystem *css;
  1065. for_each_css(css) {
  1066. device_unregister(&css->pseudo_subchannel->dev);
  1067. device_unregister(&css->device);
  1068. }
  1069. bus_unregister(&css_bus_type);
  1070. crw_unregister_handler(CRW_RSC_SCH);
  1071. idset_free(slow_subchannel_set);
  1072. chsc_init_cleanup();
  1073. isc_unregister(IO_SCH_ISC);
  1074. }
  1075. static int __init channel_subsystem_init(void)
  1076. {
  1077. int ret;
  1078. ret = css_bus_init();
  1079. if (ret)
  1080. return ret;
  1081. cio_work_q = create_singlethread_workqueue("cio");
  1082. if (!cio_work_q) {
  1083. ret = -ENOMEM;
  1084. goto out_bus;
  1085. }
  1086. ret = io_subchannel_init();
  1087. if (ret)
  1088. goto out_wq;
  1089. /* Register subchannels which are already in use. */
  1090. cio_register_early_subchannels();
  1091. /* Start initial subchannel evaluation. */
  1092. css_schedule_eval_all();
  1093. return ret;
  1094. out_wq:
  1095. destroy_workqueue(cio_work_q);
  1096. out_bus:
  1097. css_bus_cleanup();
  1098. return ret;
  1099. }
  1100. subsys_initcall(channel_subsystem_init);
  1101. static int css_settle(struct device_driver *drv, void *unused)
  1102. {
  1103. struct css_driver *cssdrv = to_cssdriver(drv);
  1104. if (cssdrv->settle)
  1105. return cssdrv->settle();
  1106. return 0;
  1107. }
  1108. int css_complete_work(void)
  1109. {
  1110. int ret;
  1111. /* Wait for the evaluation of subchannels to finish. */
  1112. ret = wait_event_interruptible(css_eval_wq,
  1113. atomic_read(&css_eval_scheduled) == 0);
  1114. if (ret)
  1115. return -EINTR;
  1116. flush_workqueue(cio_work_q);
  1117. /* Wait for the subchannel type specific initialization to finish */
  1118. return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
  1119. }
  1120. /*
  1121. * Wait for the initialization of devices to finish, to make sure we are
  1122. * done with our setup if the search for the root device starts.
  1123. */
  1124. static int __init channel_subsystem_init_sync(void)
  1125. {
  1126. css_complete_work();
  1127. return 0;
  1128. }
  1129. subsys_initcall_sync(channel_subsystem_init_sync);
  1130. #ifdef CONFIG_PROC_FS
  1131. static ssize_t cio_settle_write(struct file *file, const char __user *buf,
  1132. size_t count, loff_t *ppos)
  1133. {
  1134. int ret;
  1135. /* Handle pending CRW's. */
  1136. crw_wait_for_channel_report();
  1137. ret = css_complete_work();
  1138. return ret ? ret : count;
  1139. }
  1140. static const struct proc_ops cio_settle_proc_ops = {
  1141. .proc_open = nonseekable_open,
  1142. .proc_write = cio_settle_write,
  1143. .proc_lseek = no_llseek,
  1144. };
  1145. static int __init cio_settle_init(void)
  1146. {
  1147. struct proc_dir_entry *entry;
  1148. entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
  1149. if (!entry)
  1150. return -ENOMEM;
  1151. return 0;
  1152. }
  1153. device_initcall(cio_settle_init);
  1154. #endif /*CONFIG_PROC_FS*/
  1155. int sch_is_pseudo_sch(struct subchannel *sch)
  1156. {
  1157. if (!sch->dev.parent)
  1158. return 0;
  1159. return sch == to_css(sch->dev.parent)->pseudo_subchannel;
  1160. }
  1161. static int css_bus_match(struct device *dev, struct device_driver *drv)
  1162. {
  1163. struct subchannel *sch = to_subchannel(dev);
  1164. struct css_driver *driver = to_cssdriver(drv);
  1165. struct css_device_id *id;
  1166. /* When driver_override is set, only bind to the matching driver */
  1167. if (sch->driver_override && strcmp(sch->driver_override, drv->name))
  1168. return 0;
  1169. for (id = driver->subchannel_type; id->match_flags; id++) {
  1170. if (sch->st == id->type)
  1171. return 1;
  1172. }
  1173. return 0;
  1174. }
  1175. static int css_probe(struct device *dev)
  1176. {
  1177. struct subchannel *sch;
  1178. int ret;
  1179. sch = to_subchannel(dev);
  1180. sch->driver = to_cssdriver(dev->driver);
  1181. ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
  1182. if (ret)
  1183. sch->driver = NULL;
  1184. return ret;
  1185. }
  1186. static void css_remove(struct device *dev)
  1187. {
  1188. struct subchannel *sch;
  1189. sch = to_subchannel(dev);
  1190. if (sch->driver->remove)
  1191. sch->driver->remove(sch);
  1192. sch->driver = NULL;
  1193. }
  1194. static void css_shutdown(struct device *dev)
  1195. {
  1196. struct subchannel *sch;
  1197. sch = to_subchannel(dev);
  1198. if (sch->driver && sch->driver->shutdown)
  1199. sch->driver->shutdown(sch);
  1200. }
  1201. static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
  1202. {
  1203. struct subchannel *sch = to_subchannel(dev);
  1204. int ret;
  1205. ret = add_uevent_var(env, "ST=%01X", sch->st);
  1206. if (ret)
  1207. return ret;
  1208. ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
  1209. return ret;
  1210. }
  1211. static struct bus_type css_bus_type = {
  1212. .name = "css",
  1213. .match = css_bus_match,
  1214. .probe = css_probe,
  1215. .remove = css_remove,
  1216. .shutdown = css_shutdown,
  1217. .uevent = css_uevent,
  1218. };
  1219. /**
  1220. * css_driver_register - register a css driver
  1221. * @cdrv: css driver to register
  1222. *
  1223. * This is mainly a wrapper around driver_register that sets name
  1224. * and bus_type in the embedded struct device_driver correctly.
  1225. */
  1226. int css_driver_register(struct css_driver *cdrv)
  1227. {
  1228. cdrv->drv.bus = &css_bus_type;
  1229. return driver_register(&cdrv->drv);
  1230. }
  1231. EXPORT_SYMBOL_GPL(css_driver_register);
  1232. /**
  1233. * css_driver_unregister - unregister a css driver
  1234. * @cdrv: css driver to unregister
  1235. *
  1236. * This is a wrapper around driver_unregister.
  1237. */
  1238. void css_driver_unregister(struct css_driver *cdrv)
  1239. {
  1240. driver_unregister(&cdrv->drv);
  1241. }
  1242. EXPORT_SYMBOL_GPL(css_driver_unregister);