zns.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVMe ZNS-ZBD command implementation.
  4. * Copyright (C) 2021 Western Digital Corporation or its affiliates.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/nvme.h>
  8. #include <linux/blkdev.h>
  9. #include "nvmet.h"
  10. /*
  11. * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
  12. * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
  13. * as page_shift value. When calculating the ZASL use shift by 12.
  14. */
  15. #define NVMET_MPSMIN_SHIFT 12
  16. static inline u8 nvmet_zasl(unsigned int zone_append_sects)
  17. {
  18. /*
  19. * Zone Append Size Limit (zasl) is expressed as a power of 2 value
  20. * with the minimum memory page size (i.e. 12) as unit.
  21. */
  22. return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
  23. }
  24. static int validate_conv_zones_cb(struct blk_zone *z,
  25. unsigned int i, void *data)
  26. {
  27. if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
  28. return -EOPNOTSUPP;
  29. return 0;
  30. }
  31. bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
  32. {
  33. u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
  34. struct gendisk *bd_disk = ns->bdev->bd_disk;
  35. int ret;
  36. if (ns->subsys->zasl) {
  37. if (ns->subsys->zasl > zasl)
  38. return false;
  39. }
  40. ns->subsys->zasl = zasl;
  41. /*
  42. * Generic zoned block devices may have a smaller last zone which is
  43. * not supported by ZNS. Exclude zoned drives that have such smaller
  44. * last zone.
  45. */
  46. if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
  47. return false;
  48. /*
  49. * ZNS does not define a conventional zone type. If the underlying
  50. * device has a bitmap set indicating the existence of conventional
  51. * zones, reject the device. Otherwise, use report zones to detect if
  52. * the device has conventional zones.
  53. */
  54. if (ns->bdev->bd_disk->conv_zones_bitmap)
  55. return false;
  56. ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
  57. validate_conv_zones_cb, NULL);
  58. if (ret < 0)
  59. return false;
  60. ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
  61. return true;
  62. }
  63. void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
  64. {
  65. u8 zasl = req->sq->ctrl->subsys->zasl;
  66. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  67. struct nvme_id_ctrl_zns *id;
  68. u16 status;
  69. id = kzalloc(sizeof(*id), GFP_KERNEL);
  70. if (!id) {
  71. status = NVME_SC_INTERNAL;
  72. goto out;
  73. }
  74. if (ctrl->ops->get_mdts)
  75. id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
  76. else
  77. id->zasl = zasl;
  78. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  79. kfree(id);
  80. out:
  81. nvmet_req_complete(req, status);
  82. }
  83. void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
  84. {
  85. struct nvme_id_ns_zns *id_zns = NULL;
  86. u64 zsze;
  87. u16 status;
  88. u32 mar, mor;
  89. if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
  90. req->error_loc = offsetof(struct nvme_identify, nsid);
  91. status = NVME_SC_INVALID_NS | NVME_SC_DNR;
  92. goto out;
  93. }
  94. id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
  95. if (!id_zns) {
  96. status = NVME_SC_INTERNAL;
  97. goto out;
  98. }
  99. status = nvmet_req_find_ns(req);
  100. if (status)
  101. goto done;
  102. if (nvmet_ns_revalidate(req->ns)) {
  103. mutex_lock(&req->ns->subsys->lock);
  104. nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
  105. mutex_unlock(&req->ns->subsys->lock);
  106. }
  107. if (!bdev_is_zoned(req->ns->bdev)) {
  108. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  109. req->error_loc = offsetof(struct nvme_identify, nsid);
  110. goto out;
  111. }
  112. zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
  113. req->ns->blksize_shift;
  114. id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
  115. mor = bdev_max_open_zones(req->ns->bdev);
  116. if (!mor)
  117. mor = U32_MAX;
  118. else
  119. mor--;
  120. id_zns->mor = cpu_to_le32(mor);
  121. mar = bdev_max_active_zones(req->ns->bdev);
  122. if (!mar)
  123. mar = U32_MAX;
  124. else
  125. mar--;
  126. id_zns->mar = cpu_to_le32(mar);
  127. done:
  128. status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
  129. out:
  130. kfree(id_zns);
  131. nvmet_req_complete(req, status);
  132. }
  133. static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
  134. {
  135. sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
  136. u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
  137. if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
  138. req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
  139. return NVME_SC_LBA_RANGE | NVME_SC_DNR;
  140. }
  141. if (out_bufsize < sizeof(struct nvme_zone_report)) {
  142. req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
  143. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  144. }
  145. if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
  146. req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
  147. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  148. }
  149. switch (req->cmd->zmr.pr) {
  150. case 0:
  151. case 1:
  152. break;
  153. default:
  154. req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
  155. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  156. }
  157. switch (req->cmd->zmr.zrasf) {
  158. case NVME_ZRASF_ZONE_REPORT_ALL:
  159. case NVME_ZRASF_ZONE_STATE_EMPTY:
  160. case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
  161. case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
  162. case NVME_ZRASF_ZONE_STATE_CLOSED:
  163. case NVME_ZRASF_ZONE_STATE_FULL:
  164. case NVME_ZRASF_ZONE_STATE_READONLY:
  165. case NVME_ZRASF_ZONE_STATE_OFFLINE:
  166. break;
  167. default:
  168. req->error_loc =
  169. offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
  170. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  171. }
  172. return NVME_SC_SUCCESS;
  173. }
  174. struct nvmet_report_zone_data {
  175. struct nvmet_req *req;
  176. u64 out_buf_offset;
  177. u64 out_nr_zones;
  178. u64 nr_zones;
  179. u8 zrasf;
  180. };
  181. static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
  182. {
  183. static const unsigned int nvme_zrasf_to_blk_zcond[] = {
  184. [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
  185. [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
  186. [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
  187. [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
  188. [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
  189. [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
  190. [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
  191. };
  192. struct nvmet_report_zone_data *rz = d;
  193. if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
  194. z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
  195. return 0;
  196. if (rz->nr_zones < rz->out_nr_zones) {
  197. struct nvme_zone_descriptor zdesc = { };
  198. u16 status;
  199. zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
  200. zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
  201. zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
  202. zdesc.za = z->reset ? 1 << 2 : 0;
  203. zdesc.zs = z->cond << 4;
  204. zdesc.zt = z->type;
  205. status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
  206. sizeof(zdesc));
  207. if (status)
  208. return -EINVAL;
  209. rz->out_buf_offset += sizeof(zdesc);
  210. }
  211. rz->nr_zones++;
  212. return 0;
  213. }
  214. static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
  215. {
  216. unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
  217. return bdev_nr_zones(req->ns->bdev) -
  218. (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
  219. }
  220. static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
  221. {
  222. if (bufsize <= sizeof(struct nvme_zone_report))
  223. return 0;
  224. return (bufsize - sizeof(struct nvme_zone_report)) /
  225. sizeof(struct nvme_zone_descriptor);
  226. }
  227. static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
  228. {
  229. struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
  230. sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
  231. unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
  232. u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
  233. __le64 nr_zones;
  234. u16 status;
  235. int ret;
  236. struct nvmet_report_zone_data rz_data = {
  237. .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
  238. /* leave the place for report zone header */
  239. .out_buf_offset = sizeof(struct nvme_zone_report),
  240. .zrasf = req->cmd->zmr.zrasf,
  241. .nr_zones = 0,
  242. .req = req,
  243. };
  244. status = nvmet_bdev_validate_zone_mgmt_recv(req);
  245. if (status)
  246. goto out;
  247. if (!req_slba_nr_zones) {
  248. status = NVME_SC_SUCCESS;
  249. goto out;
  250. }
  251. ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
  252. nvmet_bdev_report_zone_cb, &rz_data);
  253. if (ret < 0) {
  254. status = NVME_SC_INTERNAL;
  255. goto out;
  256. }
  257. /*
  258. * When partial bit is set nr_zones must indicate the number of zone
  259. * descriptors actually transferred.
  260. */
  261. if (req->cmd->zmr.pr)
  262. rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
  263. nr_zones = cpu_to_le64(rz_data.nr_zones);
  264. status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
  265. out:
  266. nvmet_req_complete(req, status);
  267. }
  268. void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
  269. {
  270. INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
  271. queue_work(zbd_wq, &req->z.zmgmt_work);
  272. }
  273. static inline enum req_op zsa_req_op(u8 zsa)
  274. {
  275. switch (zsa) {
  276. case NVME_ZONE_OPEN:
  277. return REQ_OP_ZONE_OPEN;
  278. case NVME_ZONE_CLOSE:
  279. return REQ_OP_ZONE_CLOSE;
  280. case NVME_ZONE_FINISH:
  281. return REQ_OP_ZONE_FINISH;
  282. case NVME_ZONE_RESET:
  283. return REQ_OP_ZONE_RESET;
  284. default:
  285. return REQ_OP_LAST;
  286. }
  287. }
  288. static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
  289. {
  290. switch (ret) {
  291. case 0:
  292. return NVME_SC_SUCCESS;
  293. case -EINVAL:
  294. case -EIO:
  295. return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
  296. default:
  297. return NVME_SC_INTERNAL;
  298. }
  299. }
  300. struct nvmet_zone_mgmt_send_all_data {
  301. unsigned long *zbitmap;
  302. struct nvmet_req *req;
  303. };
  304. static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
  305. {
  306. struct nvmet_zone_mgmt_send_all_data *data = d;
  307. switch (zsa_req_op(data->req->cmd->zms.zsa)) {
  308. case REQ_OP_ZONE_OPEN:
  309. switch (z->cond) {
  310. case BLK_ZONE_COND_CLOSED:
  311. break;
  312. default:
  313. return 0;
  314. }
  315. break;
  316. case REQ_OP_ZONE_CLOSE:
  317. switch (z->cond) {
  318. case BLK_ZONE_COND_IMP_OPEN:
  319. case BLK_ZONE_COND_EXP_OPEN:
  320. break;
  321. default:
  322. return 0;
  323. }
  324. break;
  325. case REQ_OP_ZONE_FINISH:
  326. switch (z->cond) {
  327. case BLK_ZONE_COND_IMP_OPEN:
  328. case BLK_ZONE_COND_EXP_OPEN:
  329. case BLK_ZONE_COND_CLOSED:
  330. break;
  331. default:
  332. return 0;
  333. }
  334. break;
  335. default:
  336. return -EINVAL;
  337. }
  338. set_bit(i, data->zbitmap);
  339. return 0;
  340. }
  341. static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
  342. {
  343. struct block_device *bdev = req->ns->bdev;
  344. unsigned int nr_zones = bdev_nr_zones(bdev);
  345. struct bio *bio = NULL;
  346. sector_t sector = 0;
  347. int ret;
  348. struct nvmet_zone_mgmt_send_all_data d = {
  349. .req = req,
  350. };
  351. d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
  352. GFP_NOIO, bdev->bd_disk->node_id);
  353. if (!d.zbitmap) {
  354. ret = -ENOMEM;
  355. goto out;
  356. }
  357. /* Scan and build bitmap of the eligible zones */
  358. ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
  359. if (ret != nr_zones) {
  360. if (ret > 0)
  361. ret = -EIO;
  362. goto out;
  363. } else {
  364. /* We scanned all the zones */
  365. ret = 0;
  366. }
  367. while (sector < bdev_nr_sectors(bdev)) {
  368. if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
  369. bio = blk_next_bio(bio, bdev, 0,
  370. zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
  371. GFP_KERNEL);
  372. bio->bi_iter.bi_sector = sector;
  373. /* This may take a while, so be nice to others */
  374. cond_resched();
  375. }
  376. sector += bdev_zone_sectors(bdev);
  377. }
  378. if (bio) {
  379. ret = submit_bio_wait(bio);
  380. bio_put(bio);
  381. }
  382. out:
  383. kfree(d.zbitmap);
  384. return blkdev_zone_mgmt_errno_to_nvme_status(ret);
  385. }
  386. static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
  387. {
  388. int ret;
  389. switch (zsa_req_op(req->cmd->zms.zsa)) {
  390. case REQ_OP_ZONE_RESET:
  391. ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
  392. get_capacity(req->ns->bdev->bd_disk),
  393. GFP_KERNEL);
  394. if (ret < 0)
  395. return blkdev_zone_mgmt_errno_to_nvme_status(ret);
  396. break;
  397. case REQ_OP_ZONE_OPEN:
  398. case REQ_OP_ZONE_CLOSE:
  399. case REQ_OP_ZONE_FINISH:
  400. return nvmet_bdev_zone_mgmt_emulate_all(req);
  401. default:
  402. /* this is needed to quiet compiler warning */
  403. req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
  404. return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  405. }
  406. return NVME_SC_SUCCESS;
  407. }
  408. static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
  409. {
  410. struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
  411. sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
  412. enum req_op op = zsa_req_op(req->cmd->zms.zsa);
  413. struct block_device *bdev = req->ns->bdev;
  414. sector_t zone_sectors = bdev_zone_sectors(bdev);
  415. u16 status = NVME_SC_SUCCESS;
  416. int ret;
  417. if (op == REQ_OP_LAST) {
  418. req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
  419. status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
  420. goto out;
  421. }
  422. /* when select all bit is set slba field is ignored */
  423. if (req->cmd->zms.select_all) {
  424. status = nvmet_bdev_execute_zmgmt_send_all(req);
  425. goto out;
  426. }
  427. if (sect >= get_capacity(bdev->bd_disk)) {
  428. req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
  429. status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
  430. goto out;
  431. }
  432. if (sect & (zone_sectors - 1)) {
  433. req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
  434. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  435. goto out;
  436. }
  437. ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
  438. if (ret < 0)
  439. status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
  440. out:
  441. nvmet_req_complete(req, status);
  442. }
  443. void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
  444. {
  445. INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
  446. queue_work(zbd_wq, &req->z.zmgmt_work);
  447. }
  448. static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
  449. {
  450. struct nvmet_req *req = bio->bi_private;
  451. if (bio->bi_status == BLK_STS_OK) {
  452. req->cqe->result.u64 =
  453. nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
  454. }
  455. nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
  456. nvmet_req_bio_put(req, bio);
  457. }
  458. void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
  459. {
  460. sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
  461. const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
  462. u16 status = NVME_SC_SUCCESS;
  463. unsigned int total_len = 0;
  464. struct scatterlist *sg;
  465. struct bio *bio;
  466. int sg_cnt;
  467. /* Request is completed on len mismatch in nvmet_check_transter_len() */
  468. if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
  469. return;
  470. if (!req->sg_cnt) {
  471. nvmet_req_complete(req, 0);
  472. return;
  473. }
  474. if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
  475. req->error_loc = offsetof(struct nvme_rw_command, slba);
  476. status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
  477. goto out;
  478. }
  479. if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
  480. req->error_loc = offsetof(struct nvme_rw_command, slba);
  481. status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  482. goto out;
  483. }
  484. if (nvmet_use_inline_bvec(req)) {
  485. bio = &req->z.inline_bio;
  486. bio_init(bio, req->ns->bdev, req->inline_bvec,
  487. ARRAY_SIZE(req->inline_bvec), opf);
  488. } else {
  489. bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
  490. }
  491. bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
  492. bio->bi_iter.bi_sector = sect;
  493. bio->bi_private = req;
  494. if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
  495. bio->bi_opf |= REQ_FUA;
  496. for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
  497. struct page *p = sg_page(sg);
  498. unsigned int l = sg->length;
  499. unsigned int o = sg->offset;
  500. unsigned int ret;
  501. ret = bio_add_zone_append_page(bio, p, l, o);
  502. if (ret != sg->length) {
  503. status = NVME_SC_INTERNAL;
  504. goto out_put_bio;
  505. }
  506. total_len += sg->length;
  507. }
  508. if (total_len != nvmet_rw_data_len(req)) {
  509. status = NVME_SC_INTERNAL | NVME_SC_DNR;
  510. goto out_put_bio;
  511. }
  512. submit_bio(bio);
  513. return;
  514. out_put_bio:
  515. nvmet_req_bio_put(req, bio);
  516. out:
  517. nvmet_req_complete(req, status);
  518. }
  519. u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
  520. {
  521. struct nvme_command *cmd = req->cmd;
  522. switch (cmd->common.opcode) {
  523. case nvme_cmd_zone_append:
  524. req->execute = nvmet_bdev_execute_zone_append;
  525. return 0;
  526. case nvme_cmd_zone_mgmt_recv:
  527. req->execute = nvmet_bdev_execute_zone_mgmt_recv;
  528. return 0;
  529. case nvme_cmd_zone_mgmt_send:
  530. req->execute = nvmet_bdev_execute_zone_mgmt_send;
  531. return 0;
  532. default:
  533. return nvmet_bdev_parse_io_cmd(req);
  534. }
  535. }