blk-zoned.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Zoned block device handling
  4. *
  5. * Copyright (c) 2015, Hannes Reinecke
  6. * Copyright (c) 2015, SUSE Linux GmbH
  7. *
  8. * Copyright (c) 2016, Damien Le Moal
  9. * Copyright (c) 2016, Western Digital
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/blk-mq.h>
  16. #include <linux/mm.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/sched/mm.h>
  19. #include "blk.h"
  20. #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
  21. static const char *const zone_cond_name[] = {
  22. ZONE_COND_NAME(NOT_WP),
  23. ZONE_COND_NAME(EMPTY),
  24. ZONE_COND_NAME(IMP_OPEN),
  25. ZONE_COND_NAME(EXP_OPEN),
  26. ZONE_COND_NAME(CLOSED),
  27. ZONE_COND_NAME(READONLY),
  28. ZONE_COND_NAME(FULL),
  29. ZONE_COND_NAME(OFFLINE),
  30. };
  31. #undef ZONE_COND_NAME
  32. /**
  33. * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
  34. * @zone_cond: BLK_ZONE_COND_XXX.
  35. *
  36. * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
  37. * into string format. Useful in the debugging and tracing zone conditions. For
  38. * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
  39. */
  40. const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
  41. {
  42. static const char *zone_cond_str = "UNKNOWN";
  43. if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
  44. zone_cond_str = zone_cond_name[zone_cond];
  45. return zone_cond_str;
  46. }
  47. EXPORT_SYMBOL_GPL(blk_zone_cond_str);
  48. /*
  49. * Return true if a request is a write requests that needs zone write locking.
  50. */
  51. bool blk_req_needs_zone_write_lock(struct request *rq)
  52. {
  53. if (blk_rq_is_passthrough(rq))
  54. return false;
  55. if (!rq->q->disk->seq_zones_wlock)
  56. return false;
  57. if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq)))
  58. return blk_rq_zone_is_seq(rq);
  59. return false;
  60. }
  61. EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
  62. bool blk_req_zone_write_trylock(struct request *rq)
  63. {
  64. unsigned int zno = blk_rq_zone_no(rq);
  65. if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock))
  66. return false;
  67. WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  68. rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  69. return true;
  70. }
  71. EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
  72. void __blk_req_zone_write_lock(struct request *rq)
  73. {
  74. if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
  75. rq->q->disk->seq_zones_wlock)))
  76. return;
  77. WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  78. rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  79. }
  80. EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
  81. void __blk_req_zone_write_unlock(struct request *rq)
  82. {
  83. rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
  84. if (rq->q->disk->seq_zones_wlock)
  85. WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
  86. rq->q->disk->seq_zones_wlock));
  87. }
  88. EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
  89. /**
  90. * bdev_nr_zones - Get number of zones
  91. * @bdev: Target device
  92. *
  93. * Return the total number of zones of a zoned block device. For a block
  94. * device without zone capabilities, the number of zones is always 0.
  95. */
  96. unsigned int bdev_nr_zones(struct block_device *bdev)
  97. {
  98. sector_t zone_sectors = bdev_zone_sectors(bdev);
  99. if (!bdev_is_zoned(bdev))
  100. return 0;
  101. return (bdev_nr_sectors(bdev) + zone_sectors - 1) >>
  102. ilog2(zone_sectors);
  103. }
  104. EXPORT_SYMBOL_GPL(bdev_nr_zones);
  105. /**
  106. * blkdev_report_zones - Get zones information
  107. * @bdev: Target block device
  108. * @sector: Sector from which to report zones
  109. * @nr_zones: Maximum number of zones to report
  110. * @cb: Callback function called for each reported zone
  111. * @data: Private data for the callback
  112. *
  113. * Description:
  114. * Get zone information starting from the zone containing @sector for at most
  115. * @nr_zones, and call @cb for each zone reported by the device.
  116. * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
  117. * constant can be passed to @nr_zones.
  118. * Returns the number of zones reported by the device, or a negative errno
  119. * value in case of failure.
  120. *
  121. * Note: The caller must use memalloc_noXX_save/restore() calls to control
  122. * memory allocations done within this function.
  123. */
  124. int blkdev_report_zones(struct block_device *bdev, sector_t sector,
  125. unsigned int nr_zones, report_zones_cb cb, void *data)
  126. {
  127. struct gendisk *disk = bdev->bd_disk;
  128. sector_t capacity = get_capacity(disk);
  129. if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
  130. return -EOPNOTSUPP;
  131. if (!nr_zones || sector >= capacity)
  132. return 0;
  133. return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
  134. }
  135. EXPORT_SYMBOL_GPL(blkdev_report_zones);
  136. static inline unsigned long *blk_alloc_zone_bitmap(int node,
  137. unsigned int nr_zones)
  138. {
  139. return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
  140. GFP_NOIO, node);
  141. }
  142. static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
  143. void *data)
  144. {
  145. /*
  146. * For an all-zones reset, ignore conventional, empty, read-only
  147. * and offline zones.
  148. */
  149. switch (zone->cond) {
  150. case BLK_ZONE_COND_NOT_WP:
  151. case BLK_ZONE_COND_EMPTY:
  152. case BLK_ZONE_COND_READONLY:
  153. case BLK_ZONE_COND_OFFLINE:
  154. return 0;
  155. default:
  156. set_bit(idx, (unsigned long *)data);
  157. return 0;
  158. }
  159. }
  160. static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
  161. gfp_t gfp_mask)
  162. {
  163. struct gendisk *disk = bdev->bd_disk;
  164. sector_t capacity = bdev_nr_sectors(bdev);
  165. sector_t zone_sectors = bdev_zone_sectors(bdev);
  166. unsigned long *need_reset;
  167. struct bio *bio = NULL;
  168. sector_t sector = 0;
  169. int ret;
  170. need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones);
  171. if (!need_reset)
  172. return -ENOMEM;
  173. ret = disk->fops->report_zones(disk, 0, disk->nr_zones,
  174. blk_zone_need_reset_cb, need_reset);
  175. if (ret < 0)
  176. goto out_free_need_reset;
  177. ret = 0;
  178. while (sector < capacity) {
  179. if (!test_bit(disk_zone_no(disk, sector), need_reset)) {
  180. sector += zone_sectors;
  181. continue;
  182. }
  183. bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
  184. gfp_mask);
  185. bio->bi_iter.bi_sector = sector;
  186. sector += zone_sectors;
  187. /* This may take a while, so be nice to others */
  188. cond_resched();
  189. }
  190. if (bio) {
  191. ret = submit_bio_wait(bio);
  192. bio_put(bio);
  193. }
  194. out_free_need_reset:
  195. kfree(need_reset);
  196. return ret;
  197. }
  198. static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
  199. {
  200. struct bio bio;
  201. bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
  202. return submit_bio_wait(&bio);
  203. }
  204. /**
  205. * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
  206. * @bdev: Target block device
  207. * @op: Operation to be performed on the zones
  208. * @sector: Start sector of the first zone to operate on
  209. * @nr_sectors: Number of sectors, should be at least the length of one zone and
  210. * must be zone size aligned.
  211. * @gfp_mask: Memory allocation flags (for bio_alloc)
  212. *
  213. * Description:
  214. * Perform the specified operation on the range of zones specified by
  215. * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
  216. * is valid, but the specified range should not contain conventional zones.
  217. * The operation to execute on each zone can be a zone reset, open, close
  218. * or finish request.
  219. */
  220. int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
  221. sector_t sector, sector_t nr_sectors, gfp_t gfp_mask)
  222. {
  223. struct request_queue *q = bdev_get_queue(bdev);
  224. sector_t zone_sectors = bdev_zone_sectors(bdev);
  225. sector_t capacity = bdev_nr_sectors(bdev);
  226. sector_t end_sector = sector + nr_sectors;
  227. struct bio *bio = NULL;
  228. int ret = 0;
  229. if (!bdev_is_zoned(bdev))
  230. return -EOPNOTSUPP;
  231. if (bdev_read_only(bdev))
  232. return -EPERM;
  233. if (!op_is_zone_mgmt(op))
  234. return -EOPNOTSUPP;
  235. if (end_sector <= sector || end_sector > capacity)
  236. /* Out of range */
  237. return -EINVAL;
  238. /* Check alignment (handle eventual smaller last zone) */
  239. if (sector & (zone_sectors - 1))
  240. return -EINVAL;
  241. if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
  242. return -EINVAL;
  243. /*
  244. * In the case of a zone reset operation over all zones,
  245. * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
  246. * command. For other devices, we emulate this command behavior by
  247. * identifying the zones needing a reset.
  248. */
  249. if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
  250. if (!blk_queue_zone_resetall(q))
  251. return blkdev_zone_reset_all_emulated(bdev, gfp_mask);
  252. return blkdev_zone_reset_all(bdev, gfp_mask);
  253. }
  254. while (sector < end_sector) {
  255. bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
  256. bio->bi_iter.bi_sector = sector;
  257. sector += zone_sectors;
  258. /* This may take a while, so be nice to others */
  259. cond_resched();
  260. }
  261. ret = submit_bio_wait(bio);
  262. bio_put(bio);
  263. return ret;
  264. }
  265. EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
  266. struct zone_report_args {
  267. struct blk_zone __user *zones;
  268. };
  269. static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
  270. void *data)
  271. {
  272. struct zone_report_args *args = data;
  273. if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
  274. return -EFAULT;
  275. return 0;
  276. }
  277. /*
  278. * BLKREPORTZONE ioctl processing.
  279. * Called from blkdev_ioctl.
  280. */
  281. int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
  282. unsigned int cmd, unsigned long arg)
  283. {
  284. void __user *argp = (void __user *)arg;
  285. struct zone_report_args args;
  286. struct request_queue *q;
  287. struct blk_zone_report rep;
  288. int ret;
  289. if (!argp)
  290. return -EINVAL;
  291. q = bdev_get_queue(bdev);
  292. if (!q)
  293. return -ENXIO;
  294. if (!bdev_is_zoned(bdev))
  295. return -ENOTTY;
  296. if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
  297. return -EFAULT;
  298. if (!rep.nr_zones)
  299. return -EINVAL;
  300. args.zones = argp + sizeof(struct blk_zone_report);
  301. ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
  302. blkdev_copy_zone_to_user, &args);
  303. if (ret < 0)
  304. return ret;
  305. rep.nr_zones = ret;
  306. rep.flags = BLK_ZONE_REP_CAPACITY;
  307. if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
  308. return -EFAULT;
  309. return 0;
  310. }
  311. static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
  312. const struct blk_zone_range *zrange)
  313. {
  314. loff_t start, end;
  315. if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
  316. zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
  317. /* Out of range */
  318. return -EINVAL;
  319. start = zrange->sector << SECTOR_SHIFT;
  320. end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
  321. return truncate_bdev_range(bdev, mode, start, end);
  322. }
  323. /*
  324. * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
  325. * Called from blkdev_ioctl.
  326. */
  327. int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
  328. unsigned int cmd, unsigned long arg)
  329. {
  330. void __user *argp = (void __user *)arg;
  331. struct request_queue *q;
  332. struct blk_zone_range zrange;
  333. enum req_op op;
  334. int ret;
  335. if (!argp)
  336. return -EINVAL;
  337. q = bdev_get_queue(bdev);
  338. if (!q)
  339. return -ENXIO;
  340. if (!bdev_is_zoned(bdev))
  341. return -ENOTTY;
  342. if (!(mode & FMODE_WRITE))
  343. return -EBADF;
  344. if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
  345. return -EFAULT;
  346. switch (cmd) {
  347. case BLKRESETZONE:
  348. op = REQ_OP_ZONE_RESET;
  349. /* Invalidate the page cache, including dirty pages. */
  350. filemap_invalidate_lock(bdev->bd_inode->i_mapping);
  351. ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
  352. if (ret)
  353. goto fail;
  354. break;
  355. case BLKOPENZONE:
  356. op = REQ_OP_ZONE_OPEN;
  357. break;
  358. case BLKCLOSEZONE:
  359. op = REQ_OP_ZONE_CLOSE;
  360. break;
  361. case BLKFINISHZONE:
  362. op = REQ_OP_ZONE_FINISH;
  363. break;
  364. default:
  365. return -ENOTTY;
  366. }
  367. ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
  368. GFP_KERNEL);
  369. fail:
  370. if (cmd == BLKRESETZONE)
  371. filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
  372. return ret;
  373. }
  374. void disk_free_zone_bitmaps(struct gendisk *disk)
  375. {
  376. kfree(disk->conv_zones_bitmap);
  377. disk->conv_zones_bitmap = NULL;
  378. kfree(disk->seq_zones_wlock);
  379. disk->seq_zones_wlock = NULL;
  380. }
  381. struct blk_revalidate_zone_args {
  382. struct gendisk *disk;
  383. unsigned long *conv_zones_bitmap;
  384. unsigned long *seq_zones_wlock;
  385. unsigned int nr_zones;
  386. sector_t zone_sectors;
  387. sector_t sector;
  388. };
  389. /*
  390. * Helper function to check the validity of zones of a zoned block device.
  391. */
  392. static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
  393. void *data)
  394. {
  395. struct blk_revalidate_zone_args *args = data;
  396. struct gendisk *disk = args->disk;
  397. struct request_queue *q = disk->queue;
  398. sector_t capacity = get_capacity(disk);
  399. /*
  400. * All zones must have the same size, with the exception on an eventual
  401. * smaller last zone.
  402. */
  403. if (zone->start == 0) {
  404. if (zone->len == 0 || !is_power_of_2(zone->len)) {
  405. pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
  406. disk->disk_name, zone->len);
  407. return -ENODEV;
  408. }
  409. args->zone_sectors = zone->len;
  410. args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
  411. } else if (zone->start + args->zone_sectors < capacity) {
  412. if (zone->len != args->zone_sectors) {
  413. pr_warn("%s: Invalid zoned device with non constant zone size\n",
  414. disk->disk_name);
  415. return -ENODEV;
  416. }
  417. } else {
  418. if (zone->len > args->zone_sectors) {
  419. pr_warn("%s: Invalid zoned device with larger last zone size\n",
  420. disk->disk_name);
  421. return -ENODEV;
  422. }
  423. }
  424. /* Check for holes in the zone report */
  425. if (zone->start != args->sector) {
  426. pr_warn("%s: Zone gap at sectors %llu..%llu\n",
  427. disk->disk_name, args->sector, zone->start);
  428. return -ENODEV;
  429. }
  430. /* Check zone type */
  431. switch (zone->type) {
  432. case BLK_ZONE_TYPE_CONVENTIONAL:
  433. if (!args->conv_zones_bitmap) {
  434. args->conv_zones_bitmap =
  435. blk_alloc_zone_bitmap(q->node, args->nr_zones);
  436. if (!args->conv_zones_bitmap)
  437. return -ENOMEM;
  438. }
  439. set_bit(idx, args->conv_zones_bitmap);
  440. break;
  441. case BLK_ZONE_TYPE_SEQWRITE_REQ:
  442. case BLK_ZONE_TYPE_SEQWRITE_PREF:
  443. if (!args->seq_zones_wlock) {
  444. args->seq_zones_wlock =
  445. blk_alloc_zone_bitmap(q->node, args->nr_zones);
  446. if (!args->seq_zones_wlock)
  447. return -ENOMEM;
  448. }
  449. break;
  450. default:
  451. pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
  452. disk->disk_name, (int)zone->type, zone->start);
  453. return -ENODEV;
  454. }
  455. args->sector += zone->len;
  456. return 0;
  457. }
  458. /**
  459. * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
  460. * @disk: Target disk
  461. * @update_driver_data: Callback to update driver data on the frozen disk
  462. *
  463. * Helper function for low-level device drivers to (re) allocate and initialize
  464. * a disk request queue zone bitmaps. This functions should normally be called
  465. * within the disk ->revalidate method for blk-mq based drivers. For BIO based
  466. * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
  467. * is correct.
  468. * If the @update_driver_data callback function is not NULL, the callback is
  469. * executed with the device request queue frozen after all zones have been
  470. * checked.
  471. */
  472. int blk_revalidate_disk_zones(struct gendisk *disk,
  473. void (*update_driver_data)(struct gendisk *disk))
  474. {
  475. struct request_queue *q = disk->queue;
  476. struct blk_revalidate_zone_args args = {
  477. .disk = disk,
  478. };
  479. unsigned int noio_flag;
  480. int ret;
  481. if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
  482. return -EIO;
  483. if (WARN_ON_ONCE(!queue_is_mq(q)))
  484. return -EIO;
  485. if (!get_capacity(disk))
  486. return -EIO;
  487. /*
  488. * Ensure that all memory allocations in this context are done as if
  489. * GFP_NOIO was specified.
  490. */
  491. noio_flag = memalloc_noio_save();
  492. ret = disk->fops->report_zones(disk, 0, UINT_MAX,
  493. blk_revalidate_zone_cb, &args);
  494. if (!ret) {
  495. pr_warn("%s: No zones reported\n", disk->disk_name);
  496. ret = -ENODEV;
  497. }
  498. memalloc_noio_restore(noio_flag);
  499. /*
  500. * If zones where reported, make sure that the entire disk capacity
  501. * has been checked.
  502. */
  503. if (ret > 0 && args.sector != get_capacity(disk)) {
  504. pr_warn("%s: Missing zones from sector %llu\n",
  505. disk->disk_name, args.sector);
  506. ret = -ENODEV;
  507. }
  508. /*
  509. * Install the new bitmaps and update nr_zones only once the queue is
  510. * stopped and all I/Os are completed (i.e. a scheduler is not
  511. * referencing the bitmaps).
  512. */
  513. blk_mq_freeze_queue(q);
  514. if (ret > 0) {
  515. blk_queue_chunk_sectors(q, args.zone_sectors);
  516. disk->nr_zones = args.nr_zones;
  517. swap(disk->seq_zones_wlock, args.seq_zones_wlock);
  518. swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
  519. if (update_driver_data)
  520. update_driver_data(disk);
  521. ret = 0;
  522. } else {
  523. pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
  524. disk_free_zone_bitmaps(disk);
  525. }
  526. blk_mq_unfreeze_queue(q);
  527. kfree(args.seq_zones_wlock);
  528. kfree(args.conv_zones_bitmap);
  529. return ret;
  530. }
  531. EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
  532. void disk_clear_zone_settings(struct gendisk *disk)
  533. {
  534. struct request_queue *q = disk->queue;
  535. blk_mq_freeze_queue(q);
  536. disk_free_zone_bitmaps(disk);
  537. blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
  538. q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
  539. disk->nr_zones = 0;
  540. disk->max_open_zones = 0;
  541. disk->max_active_zones = 0;
  542. q->limits.chunk_sectors = 0;
  543. q->limits.zone_write_granularity = 0;
  544. q->limits.max_zone_append_sectors = 0;
  545. blk_mq_unfreeze_queue(q);
  546. }