block.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014 Ezequiel Garcia
  4. * Copyright (c) 2011 Free Electrons
  5. *
  6. * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
  7. * Copyright (c) International Business Machines Corp., 2006
  8. * Copyright (c) Nokia Corporation, 2007
  9. * Authors: Artem Bityutskiy, Frank Haverkamp
  10. */
  11. /*
  12. * Read-only block devices on top of UBI volumes
  13. *
  14. * A simple implementation to allow a block device to be layered on top of a
  15. * UBI volume. The implementation is provided by creating a static 1-to-1
  16. * mapping between the block device and the UBI volume.
  17. *
  18. * The addressed byte is obtained from the addressed block sector, which is
  19. * mapped linearly into the corresponding LEB:
  20. *
  21. * LEB number = addressed byte / LEB size
  22. *
  23. * This feature is compiled in the UBI core, and adds a 'block' parameter
  24. * to allow early creation of block devices on top of UBI volumes. Runtime
  25. * block creation/removal for UBI volumes is provided through two UBI ioctls:
  26. * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
  27. */
  28. #include <linux/module.h>
  29. #include <linux/init.h>
  30. #include <linux/err.h>
  31. #include <linux/kernel.h>
  32. #include <linux/list.h>
  33. #include <linux/mutex.h>
  34. #include <linux/slab.h>
  35. #include <linux/mtd/ubi.h>
  36. #include <linux/workqueue.h>
  37. #include <linux/blkdev.h>
  38. #include <linux/blk-mq.h>
  39. #include <linux/hdreg.h>
  40. #include <linux/scatterlist.h>
  41. #include <linux/idr.h>
  42. #include <asm/div64.h>
  43. #include "ubi-media.h"
  44. #include "ubi.h"
  45. /* Maximum number of supported devices */
  46. #define UBIBLOCK_MAX_DEVICES 32
  47. /* Maximum length of the 'block=' parameter */
  48. #define UBIBLOCK_PARAM_LEN 63
  49. /* Maximum number of comma-separated items in the 'block=' parameter */
  50. #define UBIBLOCK_PARAM_COUNT 2
  51. struct ubiblock_param {
  52. int ubi_num;
  53. int vol_id;
  54. char name[UBIBLOCK_PARAM_LEN+1];
  55. };
  56. struct ubiblock_pdu {
  57. struct work_struct work;
  58. struct ubi_sgl usgl;
  59. };
  60. /* Numbers of elements set in the @ubiblock_param array */
  61. static int ubiblock_devs __initdata;
  62. /* MTD devices specification parameters */
  63. static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
  64. struct ubiblock {
  65. struct ubi_volume_desc *desc;
  66. int ubi_num;
  67. int vol_id;
  68. int refcnt;
  69. int leb_size;
  70. struct gendisk *gd;
  71. struct request_queue *rq;
  72. struct workqueue_struct *wq;
  73. struct mutex dev_mutex;
  74. struct list_head list;
  75. struct blk_mq_tag_set tag_set;
  76. };
  77. /* Linked list of all ubiblock instances */
  78. static LIST_HEAD(ubiblock_devices);
  79. static DEFINE_IDR(ubiblock_minor_idr);
  80. /* Protects ubiblock_devices and ubiblock_minor_idr */
  81. static DEFINE_MUTEX(devices_mutex);
  82. static int ubiblock_major;
  83. static int __init ubiblock_set_param(const char *val,
  84. const struct kernel_param *kp)
  85. {
  86. int i, ret;
  87. size_t len;
  88. struct ubiblock_param *param;
  89. char buf[UBIBLOCK_PARAM_LEN];
  90. char *pbuf = &buf[0];
  91. char *tokens[UBIBLOCK_PARAM_COUNT];
  92. if (!val)
  93. return -EINVAL;
  94. len = strnlen(val, UBIBLOCK_PARAM_LEN);
  95. if (len == 0) {
  96. pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
  97. return 0;
  98. }
  99. if (len == UBIBLOCK_PARAM_LEN) {
  100. pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
  101. val, UBIBLOCK_PARAM_LEN);
  102. return -EINVAL;
  103. }
  104. strcpy(buf, val);
  105. /* Get rid of the final newline */
  106. if (buf[len - 1] == '\n')
  107. buf[len - 1] = '\0';
  108. for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
  109. tokens[i] = strsep(&pbuf, ",");
  110. param = &ubiblock_param[ubiblock_devs];
  111. if (tokens[1]) {
  112. /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
  113. ret = kstrtoint(tokens[0], 10, &param->ubi_num);
  114. if (ret < 0)
  115. return -EINVAL;
  116. /* Second param can be a number or a name */
  117. ret = kstrtoint(tokens[1], 10, &param->vol_id);
  118. if (ret < 0) {
  119. param->vol_id = -1;
  120. strcpy(param->name, tokens[1]);
  121. }
  122. } else {
  123. /* One parameter: must be device path */
  124. strcpy(param->name, tokens[0]);
  125. param->ubi_num = -1;
  126. param->vol_id = -1;
  127. }
  128. ubiblock_devs++;
  129. return 0;
  130. }
  131. static const struct kernel_param_ops ubiblock_param_ops = {
  132. .set = ubiblock_set_param,
  133. };
  134. module_param_cb(block, &ubiblock_param_ops, NULL, 0);
  135. MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
  136. "Multiple \"block\" parameters may be specified.\n"
  137. "UBI volumes may be specified by their number, name, or path to the device node.\n"
  138. "Examples\n"
  139. "Using the UBI volume path:\n"
  140. "ubi.block=/dev/ubi0_0\n"
  141. "Using the UBI device, and the volume name:\n"
  142. "ubi.block=0,rootfs\n"
  143. "Using both UBI device number and UBI volume number:\n"
  144. "ubi.block=0,0\n");
  145. static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
  146. {
  147. struct ubiblock *dev;
  148. list_for_each_entry(dev, &ubiblock_devices, list)
  149. if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
  150. return dev;
  151. return NULL;
  152. }
  153. static int ubiblock_read(struct ubiblock_pdu *pdu)
  154. {
  155. int ret, leb, offset, bytes_left, to_read;
  156. u64 pos;
  157. struct request *req = blk_mq_rq_from_pdu(pdu);
  158. struct ubiblock *dev = req->q->queuedata;
  159. to_read = blk_rq_bytes(req);
  160. pos = blk_rq_pos(req) << 9;
  161. /* Get LEB:offset address to read from */
  162. offset = do_div(pos, dev->leb_size);
  163. leb = pos;
  164. bytes_left = to_read;
  165. while (bytes_left) {
  166. /*
  167. * We can only read one LEB at a time. Therefore if the read
  168. * length is larger than one LEB size, we split the operation.
  169. */
  170. if (offset + to_read > dev->leb_size)
  171. to_read = dev->leb_size - offset;
  172. ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
  173. if (ret < 0)
  174. return ret;
  175. bytes_left -= to_read;
  176. to_read = bytes_left;
  177. leb += 1;
  178. offset = 0;
  179. }
  180. return 0;
  181. }
  182. static int ubiblock_open(struct block_device *bdev, fmode_t mode)
  183. {
  184. struct ubiblock *dev = bdev->bd_disk->private_data;
  185. int ret;
  186. mutex_lock(&dev->dev_mutex);
  187. if (dev->refcnt > 0) {
  188. /*
  189. * The volume is already open, just increase the reference
  190. * counter.
  191. */
  192. goto out_done;
  193. }
  194. /*
  195. * We want users to be aware they should only mount us as read-only.
  196. * It's just a paranoid check, as write requests will get rejected
  197. * in any case.
  198. */
  199. if (mode & FMODE_WRITE) {
  200. ret = -EROFS;
  201. goto out_unlock;
  202. }
  203. dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
  204. if (IS_ERR(dev->desc)) {
  205. dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
  206. dev->ubi_num, dev->vol_id);
  207. ret = PTR_ERR(dev->desc);
  208. dev->desc = NULL;
  209. goto out_unlock;
  210. }
  211. out_done:
  212. dev->refcnt++;
  213. mutex_unlock(&dev->dev_mutex);
  214. return 0;
  215. out_unlock:
  216. mutex_unlock(&dev->dev_mutex);
  217. return ret;
  218. }
  219. static void ubiblock_release(struct gendisk *gd, fmode_t mode)
  220. {
  221. struct ubiblock *dev = gd->private_data;
  222. mutex_lock(&dev->dev_mutex);
  223. dev->refcnt--;
  224. if (dev->refcnt == 0) {
  225. ubi_close_volume(dev->desc);
  226. dev->desc = NULL;
  227. }
  228. mutex_unlock(&dev->dev_mutex);
  229. }
  230. static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  231. {
  232. /* Some tools might require this information */
  233. geo->heads = 1;
  234. geo->cylinders = 1;
  235. geo->sectors = get_capacity(bdev->bd_disk);
  236. geo->start = 0;
  237. return 0;
  238. }
  239. static const struct block_device_operations ubiblock_ops = {
  240. .owner = THIS_MODULE,
  241. .open = ubiblock_open,
  242. .release = ubiblock_release,
  243. .getgeo = ubiblock_getgeo,
  244. };
  245. static void ubiblock_do_work(struct work_struct *work)
  246. {
  247. int ret;
  248. struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
  249. struct request *req = blk_mq_rq_from_pdu(pdu);
  250. struct req_iterator iter;
  251. struct bio_vec bvec;
  252. blk_mq_start_request(req);
  253. /*
  254. * It is safe to ignore the return value of blk_rq_map_sg() because
  255. * the number of sg entries is limited to UBI_MAX_SG_COUNT
  256. * and ubi_read_sg() will check that limit.
  257. */
  258. blk_rq_map_sg(req->q, req, pdu->usgl.sg);
  259. ret = ubiblock_read(pdu);
  260. rq_for_each_segment(bvec, req, iter)
  261. flush_dcache_page(bvec.bv_page);
  262. blk_mq_end_request(req, errno_to_blk_status(ret));
  263. }
  264. static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
  265. const struct blk_mq_queue_data *bd)
  266. {
  267. struct request *req = bd->rq;
  268. struct ubiblock *dev = hctx->queue->queuedata;
  269. struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
  270. switch (req_op(req)) {
  271. case REQ_OP_READ:
  272. ubi_sgl_init(&pdu->usgl);
  273. queue_work(dev->wq, &pdu->work);
  274. return BLK_STS_OK;
  275. default:
  276. return BLK_STS_IOERR;
  277. }
  278. }
  279. static int ubiblock_init_request(struct blk_mq_tag_set *set,
  280. struct request *req, unsigned int hctx_idx,
  281. unsigned int numa_node)
  282. {
  283. struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
  284. sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
  285. INIT_WORK(&pdu->work, ubiblock_do_work);
  286. return 0;
  287. }
  288. static const struct blk_mq_ops ubiblock_mq_ops = {
  289. .queue_rq = ubiblock_queue_rq,
  290. .init_request = ubiblock_init_request,
  291. };
  292. static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
  293. {
  294. u64 size = vi->used_bytes >> 9;
  295. if (vi->used_bytes % 512) {
  296. pr_warn("UBI: block: volume size is not a multiple of 512, "
  297. "last %llu bytes are ignored!\n",
  298. vi->used_bytes - (size << 9));
  299. }
  300. if ((sector_t)size != size)
  301. return -EFBIG;
  302. *disk_capacity = size;
  303. return 0;
  304. }
  305. int ubiblock_create(struct ubi_volume_info *vi)
  306. {
  307. struct ubiblock *dev;
  308. struct gendisk *gd;
  309. u64 disk_capacity;
  310. int ret;
  311. ret = calc_disk_capacity(vi, &disk_capacity);
  312. if (ret) {
  313. return ret;
  314. }
  315. /* Check that the volume isn't already handled */
  316. mutex_lock(&devices_mutex);
  317. if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
  318. ret = -EEXIST;
  319. goto out_unlock;
  320. }
  321. dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
  322. if (!dev) {
  323. ret = -ENOMEM;
  324. goto out_unlock;
  325. }
  326. mutex_init(&dev->dev_mutex);
  327. dev->ubi_num = vi->ubi_num;
  328. dev->vol_id = vi->vol_id;
  329. dev->leb_size = vi->usable_leb_size;
  330. dev->tag_set.ops = &ubiblock_mq_ops;
  331. dev->tag_set.queue_depth = 64;
  332. dev->tag_set.numa_node = NUMA_NO_NODE;
  333. dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  334. dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
  335. dev->tag_set.driver_data = dev;
  336. dev->tag_set.nr_hw_queues = 1;
  337. ret = blk_mq_alloc_tag_set(&dev->tag_set);
  338. if (ret) {
  339. dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
  340. goto out_free_dev;
  341. }
  342. /* Initialize the gendisk of this ubiblock device */
  343. gd = blk_mq_alloc_disk(&dev->tag_set, dev);
  344. if (IS_ERR(gd)) {
  345. ret = PTR_ERR(gd);
  346. goto out_free_tags;
  347. }
  348. gd->fops = &ubiblock_ops;
  349. gd->major = ubiblock_major;
  350. gd->minors = 1;
  351. gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
  352. if (gd->first_minor < 0) {
  353. dev_err(disk_to_dev(gd),
  354. "block: dynamic minor allocation failed");
  355. ret = -ENODEV;
  356. goto out_cleanup_disk;
  357. }
  358. gd->flags |= GENHD_FL_NO_PART;
  359. gd->private_data = dev;
  360. sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
  361. set_capacity(gd, disk_capacity);
  362. dev->gd = gd;
  363. dev->rq = gd->queue;
  364. blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
  365. /*
  366. * Create one workqueue per volume (per registered block device).
  367. * Remember workqueues are cheap, they're not threads.
  368. */
  369. dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
  370. if (!dev->wq) {
  371. ret = -ENOMEM;
  372. goto out_remove_minor;
  373. }
  374. list_add_tail(&dev->list, &ubiblock_devices);
  375. /* Must be the last step: anyone can call file ops from now on */
  376. ret = add_disk(dev->gd);
  377. if (ret)
  378. goto out_destroy_wq;
  379. dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
  380. dev->ubi_num, dev->vol_id, vi->name);
  381. mutex_unlock(&devices_mutex);
  382. return 0;
  383. out_destroy_wq:
  384. list_del(&dev->list);
  385. destroy_workqueue(dev->wq);
  386. out_remove_minor:
  387. idr_remove(&ubiblock_minor_idr, gd->first_minor);
  388. out_cleanup_disk:
  389. put_disk(dev->gd);
  390. out_free_tags:
  391. blk_mq_free_tag_set(&dev->tag_set);
  392. out_free_dev:
  393. kfree(dev);
  394. out_unlock:
  395. mutex_unlock(&devices_mutex);
  396. return ret;
  397. }
  398. static void ubiblock_cleanup(struct ubiblock *dev)
  399. {
  400. /* Stop new requests to arrive */
  401. del_gendisk(dev->gd);
  402. /* Flush pending work */
  403. destroy_workqueue(dev->wq);
  404. /* Finally destroy the blk queue */
  405. dev_info(disk_to_dev(dev->gd), "released");
  406. put_disk(dev->gd);
  407. blk_mq_free_tag_set(&dev->tag_set);
  408. idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
  409. }
  410. int ubiblock_remove(struct ubi_volume_info *vi)
  411. {
  412. struct ubiblock *dev;
  413. int ret;
  414. mutex_lock(&devices_mutex);
  415. dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
  416. if (!dev) {
  417. ret = -ENODEV;
  418. goto out_unlock;
  419. }
  420. /* Found a device, let's lock it so we can check if it's busy */
  421. mutex_lock(&dev->dev_mutex);
  422. if (dev->refcnt > 0) {
  423. ret = -EBUSY;
  424. goto out_unlock_dev;
  425. }
  426. /* Remove from device list */
  427. list_del(&dev->list);
  428. ubiblock_cleanup(dev);
  429. mutex_unlock(&dev->dev_mutex);
  430. mutex_unlock(&devices_mutex);
  431. kfree(dev);
  432. return 0;
  433. out_unlock_dev:
  434. mutex_unlock(&dev->dev_mutex);
  435. out_unlock:
  436. mutex_unlock(&devices_mutex);
  437. return ret;
  438. }
  439. static int ubiblock_resize(struct ubi_volume_info *vi)
  440. {
  441. struct ubiblock *dev;
  442. u64 disk_capacity;
  443. int ret;
  444. /*
  445. * Need to lock the device list until we stop using the device,
  446. * otherwise the device struct might get released in
  447. * 'ubiblock_remove()'.
  448. */
  449. mutex_lock(&devices_mutex);
  450. dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
  451. if (!dev) {
  452. mutex_unlock(&devices_mutex);
  453. return -ENODEV;
  454. }
  455. ret = calc_disk_capacity(vi, &disk_capacity);
  456. if (ret) {
  457. mutex_unlock(&devices_mutex);
  458. if (ret == -EFBIG) {
  459. dev_warn(disk_to_dev(dev->gd),
  460. "the volume is too big (%d LEBs), cannot resize",
  461. vi->size);
  462. }
  463. return ret;
  464. }
  465. mutex_lock(&dev->dev_mutex);
  466. if (get_capacity(dev->gd) != disk_capacity) {
  467. set_capacity(dev->gd, disk_capacity);
  468. dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
  469. vi->used_bytes);
  470. }
  471. mutex_unlock(&dev->dev_mutex);
  472. mutex_unlock(&devices_mutex);
  473. return 0;
  474. }
  475. static int ubiblock_notify(struct notifier_block *nb,
  476. unsigned long notification_type, void *ns_ptr)
  477. {
  478. struct ubi_notification *nt = ns_ptr;
  479. switch (notification_type) {
  480. case UBI_VOLUME_ADDED:
  481. /*
  482. * We want to enforce explicit block device creation for
  483. * volumes, so when a volume is added we do nothing.
  484. */
  485. break;
  486. case UBI_VOLUME_REMOVED:
  487. ubiblock_remove(&nt->vi);
  488. break;
  489. case UBI_VOLUME_RESIZED:
  490. ubiblock_resize(&nt->vi);
  491. break;
  492. case UBI_VOLUME_UPDATED:
  493. /*
  494. * If the volume is static, a content update might mean the
  495. * size (i.e. used_bytes) was also changed.
  496. */
  497. if (nt->vi.vol_type == UBI_STATIC_VOLUME)
  498. ubiblock_resize(&nt->vi);
  499. break;
  500. default:
  501. break;
  502. }
  503. return NOTIFY_OK;
  504. }
  505. static struct notifier_block ubiblock_notifier = {
  506. .notifier_call = ubiblock_notify,
  507. };
  508. static struct ubi_volume_desc * __init
  509. open_volume_desc(const char *name, int ubi_num, int vol_id)
  510. {
  511. if (ubi_num == -1)
  512. /* No ubi num, name must be a vol device path */
  513. return ubi_open_volume_path(name, UBI_READONLY);
  514. else if (vol_id == -1)
  515. /* No vol_id, must be vol_name */
  516. return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
  517. else
  518. return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
  519. }
  520. static void __init ubiblock_create_from_param(void)
  521. {
  522. int i, ret = 0;
  523. struct ubiblock_param *p;
  524. struct ubi_volume_desc *desc;
  525. struct ubi_volume_info vi;
  526. /*
  527. * If there is an error creating one of the ubiblocks, continue on to
  528. * create the following ubiblocks. This helps in a circumstance where
  529. * the kernel command-line specifies multiple block devices and some
  530. * may be broken, but we still want the working ones to come up.
  531. */
  532. for (i = 0; i < ubiblock_devs; i++) {
  533. p = &ubiblock_param[i];
  534. desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
  535. if (IS_ERR(desc)) {
  536. pr_err(
  537. "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
  538. p->ubi_num, p->vol_id, PTR_ERR(desc));
  539. continue;
  540. }
  541. ubi_get_volume_info(desc, &vi);
  542. ubi_close_volume(desc);
  543. ret = ubiblock_create(&vi);
  544. if (ret) {
  545. pr_err(
  546. "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
  547. vi.name, p->ubi_num, p->vol_id, ret);
  548. continue;
  549. }
  550. }
  551. }
  552. static void ubiblock_remove_all(void)
  553. {
  554. struct ubiblock *next;
  555. struct ubiblock *dev;
  556. mutex_lock(&devices_mutex);
  557. list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
  558. /* The module is being forcefully removed */
  559. WARN_ON(dev->desc);
  560. /* Remove from device list */
  561. list_del(&dev->list);
  562. ubiblock_cleanup(dev);
  563. kfree(dev);
  564. }
  565. mutex_unlock(&devices_mutex);
  566. }
  567. int __init ubiblock_init(void)
  568. {
  569. int ret;
  570. ubiblock_major = register_blkdev(0, "ubiblock");
  571. if (ubiblock_major < 0)
  572. return ubiblock_major;
  573. /*
  574. * Attach block devices from 'block=' module param.
  575. * Even if one block device in the param list fails to come up,
  576. * still allow the module to load and leave any others up.
  577. */
  578. ubiblock_create_from_param();
  579. /*
  580. * Block devices are only created upon user requests, so we ignore
  581. * existing volumes.
  582. */
  583. ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
  584. if (ret)
  585. goto err_unreg;
  586. return 0;
  587. err_unreg:
  588. unregister_blkdev(ubiblock_major, "ubiblock");
  589. ubiblock_remove_all();
  590. return ret;
  591. }
  592. void __exit ubiblock_exit(void)
  593. {
  594. ubi_unregister_volume_notifier(&ubiblock_notifier);
  595. ubiblock_remove_all();
  596. unregister_blkdev(ubiblock_major, "ubiblock");
  597. }