configfs.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Configfs interface for the NVMe target.
  4. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/stat.h>
  11. #include <linux/ctype.h>
  12. #include <linux/pci.h>
  13. #include <linux/pci-p2pdma.h>
  14. #ifdef CONFIG_NVME_TARGET_AUTH
  15. #include <linux/nvme-auth.h>
  16. #endif
  17. #include <crypto/hash.h>
  18. #include <crypto/kpp.h>
  19. #include "nvmet.h"
  20. static const struct config_item_type nvmet_host_type;
  21. static const struct config_item_type nvmet_subsys_type;
  22. static LIST_HEAD(nvmet_ports_list);
  23. struct list_head *nvmet_ports = &nvmet_ports_list;
  24. struct nvmet_type_name_map {
  25. u8 type;
  26. const char *name;
  27. };
  28. static struct nvmet_type_name_map nvmet_transport[] = {
  29. { NVMF_TRTYPE_RDMA, "rdma" },
  30. { NVMF_TRTYPE_FC, "fc" },
  31. { NVMF_TRTYPE_TCP, "tcp" },
  32. { NVMF_TRTYPE_LOOP, "loop" },
  33. };
  34. static const struct nvmet_type_name_map nvmet_addr_family[] = {
  35. { NVMF_ADDR_FAMILY_PCI, "pcie" },
  36. { NVMF_ADDR_FAMILY_IP4, "ipv4" },
  37. { NVMF_ADDR_FAMILY_IP6, "ipv6" },
  38. { NVMF_ADDR_FAMILY_IB, "ib" },
  39. { NVMF_ADDR_FAMILY_FC, "fc" },
  40. { NVMF_ADDR_FAMILY_LOOP, "loop" },
  41. };
  42. static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
  43. {
  44. if (p->enabled)
  45. pr_err("Disable port '%u' before changing attribute in %s\n",
  46. le16_to_cpu(p->disc_addr.portid), caller);
  47. return p->enabled;
  48. }
  49. /*
  50. * nvmet_port Generic ConfigFS definitions.
  51. * Used in any place in the ConfigFS tree that refers to an address.
  52. */
  53. static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
  54. {
  55. u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
  56. int i;
  57. for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
  58. if (nvmet_addr_family[i].type == adrfam)
  59. return snprintf(page, PAGE_SIZE, "%s\n",
  60. nvmet_addr_family[i].name);
  61. }
  62. return snprintf(page, PAGE_SIZE, "\n");
  63. }
  64. static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
  65. const char *page, size_t count)
  66. {
  67. struct nvmet_port *port = to_nvmet_port(item);
  68. int i;
  69. if (nvmet_is_port_enabled(port, __func__))
  70. return -EACCES;
  71. for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
  72. if (sysfs_streq(page, nvmet_addr_family[i].name))
  73. goto found;
  74. }
  75. pr_err("Invalid value '%s' for adrfam\n", page);
  76. return -EINVAL;
  77. found:
  78. port->disc_addr.adrfam = nvmet_addr_family[i].type;
  79. return count;
  80. }
  81. CONFIGFS_ATTR(nvmet_, addr_adrfam);
  82. static ssize_t nvmet_addr_portid_show(struct config_item *item,
  83. char *page)
  84. {
  85. __le16 portid = to_nvmet_port(item)->disc_addr.portid;
  86. return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
  87. }
  88. static ssize_t nvmet_addr_portid_store(struct config_item *item,
  89. const char *page, size_t count)
  90. {
  91. struct nvmet_port *port = to_nvmet_port(item);
  92. u16 portid = 0;
  93. if (kstrtou16(page, 0, &portid)) {
  94. pr_err("Invalid value '%s' for portid\n", page);
  95. return -EINVAL;
  96. }
  97. if (nvmet_is_port_enabled(port, __func__))
  98. return -EACCES;
  99. port->disc_addr.portid = cpu_to_le16(portid);
  100. return count;
  101. }
  102. CONFIGFS_ATTR(nvmet_, addr_portid);
  103. static ssize_t nvmet_addr_traddr_show(struct config_item *item,
  104. char *page)
  105. {
  106. struct nvmet_port *port = to_nvmet_port(item);
  107. return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
  108. }
  109. static ssize_t nvmet_addr_traddr_store(struct config_item *item,
  110. const char *page, size_t count)
  111. {
  112. struct nvmet_port *port = to_nvmet_port(item);
  113. if (count > NVMF_TRADDR_SIZE) {
  114. pr_err("Invalid value '%s' for traddr\n", page);
  115. return -EINVAL;
  116. }
  117. if (nvmet_is_port_enabled(port, __func__))
  118. return -EACCES;
  119. if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
  120. return -EINVAL;
  121. return count;
  122. }
  123. CONFIGFS_ATTR(nvmet_, addr_traddr);
  124. static const struct nvmet_type_name_map nvmet_addr_treq[] = {
  125. { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
  126. { NVMF_TREQ_REQUIRED, "required" },
  127. { NVMF_TREQ_NOT_REQUIRED, "not required" },
  128. };
  129. static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
  130. {
  131. u8 treq = to_nvmet_port(item)->disc_addr.treq &
  132. NVME_TREQ_SECURE_CHANNEL_MASK;
  133. int i;
  134. for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
  135. if (treq == nvmet_addr_treq[i].type)
  136. return snprintf(page, PAGE_SIZE, "%s\n",
  137. nvmet_addr_treq[i].name);
  138. }
  139. return snprintf(page, PAGE_SIZE, "\n");
  140. }
  141. static ssize_t nvmet_addr_treq_store(struct config_item *item,
  142. const char *page, size_t count)
  143. {
  144. struct nvmet_port *port = to_nvmet_port(item);
  145. u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
  146. int i;
  147. if (nvmet_is_port_enabled(port, __func__))
  148. return -EACCES;
  149. for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
  150. if (sysfs_streq(page, nvmet_addr_treq[i].name))
  151. goto found;
  152. }
  153. pr_err("Invalid value '%s' for treq\n", page);
  154. return -EINVAL;
  155. found:
  156. treq |= nvmet_addr_treq[i].type;
  157. port->disc_addr.treq = treq;
  158. return count;
  159. }
  160. CONFIGFS_ATTR(nvmet_, addr_treq);
  161. static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
  162. char *page)
  163. {
  164. struct nvmet_port *port = to_nvmet_port(item);
  165. return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
  166. }
  167. static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
  168. const char *page, size_t count)
  169. {
  170. struct nvmet_port *port = to_nvmet_port(item);
  171. if (count > NVMF_TRSVCID_SIZE) {
  172. pr_err("Invalid value '%s' for trsvcid\n", page);
  173. return -EINVAL;
  174. }
  175. if (nvmet_is_port_enabled(port, __func__))
  176. return -EACCES;
  177. if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
  178. return -EINVAL;
  179. return count;
  180. }
  181. CONFIGFS_ATTR(nvmet_, addr_trsvcid);
  182. static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
  183. char *page)
  184. {
  185. struct nvmet_port *port = to_nvmet_port(item);
  186. return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
  187. }
  188. static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
  189. const char *page, size_t count)
  190. {
  191. struct nvmet_port *port = to_nvmet_port(item);
  192. int ret;
  193. if (nvmet_is_port_enabled(port, __func__))
  194. return -EACCES;
  195. ret = kstrtoint(page, 0, &port->inline_data_size);
  196. if (ret) {
  197. pr_err("Invalid value '%s' for inline_data_size\n", page);
  198. return -EINVAL;
  199. }
  200. return count;
  201. }
  202. CONFIGFS_ATTR(nvmet_, param_inline_data_size);
  203. #ifdef CONFIG_BLK_DEV_INTEGRITY
  204. static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
  205. char *page)
  206. {
  207. struct nvmet_port *port = to_nvmet_port(item);
  208. return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
  209. }
  210. static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
  211. const char *page, size_t count)
  212. {
  213. struct nvmet_port *port = to_nvmet_port(item);
  214. bool val;
  215. if (strtobool(page, &val))
  216. return -EINVAL;
  217. if (nvmet_is_port_enabled(port, __func__))
  218. return -EACCES;
  219. port->pi_enable = val;
  220. return count;
  221. }
  222. CONFIGFS_ATTR(nvmet_, param_pi_enable);
  223. #endif
  224. static ssize_t nvmet_addr_trtype_show(struct config_item *item,
  225. char *page)
  226. {
  227. struct nvmet_port *port = to_nvmet_port(item);
  228. int i;
  229. for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
  230. if (port->disc_addr.trtype == nvmet_transport[i].type)
  231. return snprintf(page, PAGE_SIZE,
  232. "%s\n", nvmet_transport[i].name);
  233. }
  234. return sprintf(page, "\n");
  235. }
  236. static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
  237. {
  238. port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
  239. port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
  240. port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
  241. }
  242. static ssize_t nvmet_addr_trtype_store(struct config_item *item,
  243. const char *page, size_t count)
  244. {
  245. struct nvmet_port *port = to_nvmet_port(item);
  246. int i;
  247. if (nvmet_is_port_enabled(port, __func__))
  248. return -EACCES;
  249. for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
  250. if (sysfs_streq(page, nvmet_transport[i].name))
  251. goto found;
  252. }
  253. pr_err("Invalid value '%s' for trtype\n", page);
  254. return -EINVAL;
  255. found:
  256. memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
  257. port->disc_addr.trtype = nvmet_transport[i].type;
  258. if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
  259. nvmet_port_init_tsas_rdma(port);
  260. return count;
  261. }
  262. CONFIGFS_ATTR(nvmet_, addr_trtype);
  263. /*
  264. * Namespace structures & file operation functions below
  265. */
  266. static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
  267. {
  268. return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
  269. }
  270. static ssize_t nvmet_ns_device_path_store(struct config_item *item,
  271. const char *page, size_t count)
  272. {
  273. struct nvmet_ns *ns = to_nvmet_ns(item);
  274. struct nvmet_subsys *subsys = ns->subsys;
  275. size_t len;
  276. int ret;
  277. mutex_lock(&subsys->lock);
  278. ret = -EBUSY;
  279. if (ns->enabled)
  280. goto out_unlock;
  281. ret = -EINVAL;
  282. len = strcspn(page, "\n");
  283. if (!len)
  284. goto out_unlock;
  285. kfree(ns->device_path);
  286. ret = -ENOMEM;
  287. ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
  288. if (!ns->device_path)
  289. goto out_unlock;
  290. mutex_unlock(&subsys->lock);
  291. return count;
  292. out_unlock:
  293. mutex_unlock(&subsys->lock);
  294. return ret;
  295. }
  296. CONFIGFS_ATTR(nvmet_ns_, device_path);
  297. #ifdef CONFIG_PCI_P2PDMA
  298. static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
  299. {
  300. struct nvmet_ns *ns = to_nvmet_ns(item);
  301. return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
  302. }
  303. static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
  304. const char *page, size_t count)
  305. {
  306. struct nvmet_ns *ns = to_nvmet_ns(item);
  307. struct pci_dev *p2p_dev = NULL;
  308. bool use_p2pmem;
  309. int ret = count;
  310. int error;
  311. mutex_lock(&ns->subsys->lock);
  312. if (ns->enabled) {
  313. ret = -EBUSY;
  314. goto out_unlock;
  315. }
  316. error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
  317. if (error) {
  318. ret = error;
  319. goto out_unlock;
  320. }
  321. ns->use_p2pmem = use_p2pmem;
  322. pci_dev_put(ns->p2p_dev);
  323. ns->p2p_dev = p2p_dev;
  324. out_unlock:
  325. mutex_unlock(&ns->subsys->lock);
  326. return ret;
  327. }
  328. CONFIGFS_ATTR(nvmet_ns_, p2pmem);
  329. #endif /* CONFIG_PCI_P2PDMA */
  330. static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
  331. {
  332. return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
  333. }
  334. static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
  335. const char *page, size_t count)
  336. {
  337. struct nvmet_ns *ns = to_nvmet_ns(item);
  338. struct nvmet_subsys *subsys = ns->subsys;
  339. int ret = 0;
  340. mutex_lock(&subsys->lock);
  341. if (ns->enabled) {
  342. ret = -EBUSY;
  343. goto out_unlock;
  344. }
  345. if (uuid_parse(page, &ns->uuid))
  346. ret = -EINVAL;
  347. out_unlock:
  348. mutex_unlock(&subsys->lock);
  349. return ret ? ret : count;
  350. }
  351. CONFIGFS_ATTR(nvmet_ns_, device_uuid);
  352. static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
  353. {
  354. return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
  355. }
  356. static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
  357. const char *page, size_t count)
  358. {
  359. struct nvmet_ns *ns = to_nvmet_ns(item);
  360. struct nvmet_subsys *subsys = ns->subsys;
  361. u8 nguid[16];
  362. const char *p = page;
  363. int i;
  364. int ret = 0;
  365. mutex_lock(&subsys->lock);
  366. if (ns->enabled) {
  367. ret = -EBUSY;
  368. goto out_unlock;
  369. }
  370. for (i = 0; i < 16; i++) {
  371. if (p + 2 > page + count) {
  372. ret = -EINVAL;
  373. goto out_unlock;
  374. }
  375. if (!isxdigit(p[0]) || !isxdigit(p[1])) {
  376. ret = -EINVAL;
  377. goto out_unlock;
  378. }
  379. nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
  380. p += 2;
  381. if (*p == '-' || *p == ':')
  382. p++;
  383. }
  384. memcpy(&ns->nguid, nguid, sizeof(nguid));
  385. out_unlock:
  386. mutex_unlock(&subsys->lock);
  387. return ret ? ret : count;
  388. }
  389. CONFIGFS_ATTR(nvmet_ns_, device_nguid);
  390. static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
  391. {
  392. return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
  393. }
  394. static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
  395. const char *page, size_t count)
  396. {
  397. struct nvmet_ns *ns = to_nvmet_ns(item);
  398. u32 oldgrpid, newgrpid;
  399. int ret;
  400. ret = kstrtou32(page, 0, &newgrpid);
  401. if (ret)
  402. return ret;
  403. if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
  404. return -EINVAL;
  405. down_write(&nvmet_ana_sem);
  406. oldgrpid = ns->anagrpid;
  407. nvmet_ana_group_enabled[newgrpid]++;
  408. ns->anagrpid = newgrpid;
  409. nvmet_ana_group_enabled[oldgrpid]--;
  410. nvmet_ana_chgcnt++;
  411. up_write(&nvmet_ana_sem);
  412. nvmet_send_ana_event(ns->subsys, NULL);
  413. return count;
  414. }
  415. CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
  416. static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
  417. {
  418. return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
  419. }
  420. static ssize_t nvmet_ns_enable_store(struct config_item *item,
  421. const char *page, size_t count)
  422. {
  423. struct nvmet_ns *ns = to_nvmet_ns(item);
  424. bool enable;
  425. int ret = 0;
  426. if (strtobool(page, &enable))
  427. return -EINVAL;
  428. if (enable)
  429. ret = nvmet_ns_enable(ns);
  430. else
  431. nvmet_ns_disable(ns);
  432. return ret ? ret : count;
  433. }
  434. CONFIGFS_ATTR(nvmet_ns_, enable);
  435. static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
  436. {
  437. return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
  438. }
  439. static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
  440. const char *page, size_t count)
  441. {
  442. struct nvmet_ns *ns = to_nvmet_ns(item);
  443. bool val;
  444. if (strtobool(page, &val))
  445. return -EINVAL;
  446. mutex_lock(&ns->subsys->lock);
  447. if (ns->enabled) {
  448. pr_err("disable ns before setting buffered_io value.\n");
  449. mutex_unlock(&ns->subsys->lock);
  450. return -EINVAL;
  451. }
  452. ns->buffered_io = val;
  453. mutex_unlock(&ns->subsys->lock);
  454. return count;
  455. }
  456. CONFIGFS_ATTR(nvmet_ns_, buffered_io);
  457. static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
  458. const char *page, size_t count)
  459. {
  460. struct nvmet_ns *ns = to_nvmet_ns(item);
  461. bool val;
  462. if (strtobool(page, &val))
  463. return -EINVAL;
  464. if (!val)
  465. return -EINVAL;
  466. mutex_lock(&ns->subsys->lock);
  467. if (!ns->enabled) {
  468. pr_err("enable ns before revalidate.\n");
  469. mutex_unlock(&ns->subsys->lock);
  470. return -EINVAL;
  471. }
  472. if (nvmet_ns_revalidate(ns))
  473. nvmet_ns_changed(ns->subsys, ns->nsid);
  474. mutex_unlock(&ns->subsys->lock);
  475. return count;
  476. }
  477. CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
  478. static struct configfs_attribute *nvmet_ns_attrs[] = {
  479. &nvmet_ns_attr_device_path,
  480. &nvmet_ns_attr_device_nguid,
  481. &nvmet_ns_attr_device_uuid,
  482. &nvmet_ns_attr_ana_grpid,
  483. &nvmet_ns_attr_enable,
  484. &nvmet_ns_attr_buffered_io,
  485. &nvmet_ns_attr_revalidate_size,
  486. #ifdef CONFIG_PCI_P2PDMA
  487. &nvmet_ns_attr_p2pmem,
  488. #endif
  489. NULL,
  490. };
  491. static void nvmet_ns_release(struct config_item *item)
  492. {
  493. struct nvmet_ns *ns = to_nvmet_ns(item);
  494. nvmet_ns_free(ns);
  495. }
  496. static struct configfs_item_operations nvmet_ns_item_ops = {
  497. .release = nvmet_ns_release,
  498. };
  499. static const struct config_item_type nvmet_ns_type = {
  500. .ct_item_ops = &nvmet_ns_item_ops,
  501. .ct_attrs = nvmet_ns_attrs,
  502. .ct_owner = THIS_MODULE,
  503. };
  504. static struct config_group *nvmet_ns_make(struct config_group *group,
  505. const char *name)
  506. {
  507. struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
  508. struct nvmet_ns *ns;
  509. int ret;
  510. u32 nsid;
  511. ret = kstrtou32(name, 0, &nsid);
  512. if (ret)
  513. goto out;
  514. ret = -EINVAL;
  515. if (nsid == 0 || nsid == NVME_NSID_ALL) {
  516. pr_err("invalid nsid %#x", nsid);
  517. goto out;
  518. }
  519. ret = -ENOMEM;
  520. ns = nvmet_ns_alloc(subsys, nsid);
  521. if (!ns)
  522. goto out;
  523. config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
  524. pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
  525. return &ns->group;
  526. out:
  527. return ERR_PTR(ret);
  528. }
  529. static struct configfs_group_operations nvmet_namespaces_group_ops = {
  530. .make_group = nvmet_ns_make,
  531. };
  532. static const struct config_item_type nvmet_namespaces_type = {
  533. .ct_group_ops = &nvmet_namespaces_group_ops,
  534. .ct_owner = THIS_MODULE,
  535. };
  536. #ifdef CONFIG_NVME_TARGET_PASSTHRU
  537. static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
  538. char *page)
  539. {
  540. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  541. return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
  542. }
  543. static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
  544. const char *page, size_t count)
  545. {
  546. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  547. size_t len;
  548. int ret;
  549. mutex_lock(&subsys->lock);
  550. ret = -EBUSY;
  551. if (subsys->passthru_ctrl)
  552. goto out_unlock;
  553. ret = -EINVAL;
  554. len = strcspn(page, "\n");
  555. if (!len)
  556. goto out_unlock;
  557. kfree(subsys->passthru_ctrl_path);
  558. ret = -ENOMEM;
  559. subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
  560. if (!subsys->passthru_ctrl_path)
  561. goto out_unlock;
  562. mutex_unlock(&subsys->lock);
  563. return count;
  564. out_unlock:
  565. mutex_unlock(&subsys->lock);
  566. return ret;
  567. }
  568. CONFIGFS_ATTR(nvmet_passthru_, device_path);
  569. static ssize_t nvmet_passthru_enable_show(struct config_item *item,
  570. char *page)
  571. {
  572. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  573. return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
  574. }
  575. static ssize_t nvmet_passthru_enable_store(struct config_item *item,
  576. const char *page, size_t count)
  577. {
  578. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  579. bool enable;
  580. int ret = 0;
  581. if (strtobool(page, &enable))
  582. return -EINVAL;
  583. if (enable)
  584. ret = nvmet_passthru_ctrl_enable(subsys);
  585. else
  586. nvmet_passthru_ctrl_disable(subsys);
  587. return ret ? ret : count;
  588. }
  589. CONFIGFS_ATTR(nvmet_passthru_, enable);
  590. static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
  591. char *page)
  592. {
  593. return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
  594. }
  595. static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
  596. const char *page, size_t count)
  597. {
  598. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  599. unsigned int timeout;
  600. if (kstrtouint(page, 0, &timeout))
  601. return -EINVAL;
  602. subsys->admin_timeout = timeout;
  603. return count;
  604. }
  605. CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
  606. static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
  607. char *page)
  608. {
  609. return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
  610. }
  611. static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
  612. const char *page, size_t count)
  613. {
  614. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  615. unsigned int timeout;
  616. if (kstrtouint(page, 0, &timeout))
  617. return -EINVAL;
  618. subsys->io_timeout = timeout;
  619. return count;
  620. }
  621. CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
  622. static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
  623. char *page)
  624. {
  625. return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
  626. }
  627. static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
  628. const char *page, size_t count)
  629. {
  630. struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
  631. unsigned int clear_ids;
  632. if (kstrtouint(page, 0, &clear_ids))
  633. return -EINVAL;
  634. subsys->clear_ids = clear_ids;
  635. return count;
  636. }
  637. CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
  638. static struct configfs_attribute *nvmet_passthru_attrs[] = {
  639. &nvmet_passthru_attr_device_path,
  640. &nvmet_passthru_attr_enable,
  641. &nvmet_passthru_attr_admin_timeout,
  642. &nvmet_passthru_attr_io_timeout,
  643. &nvmet_passthru_attr_clear_ids,
  644. NULL,
  645. };
  646. static const struct config_item_type nvmet_passthru_type = {
  647. .ct_attrs = nvmet_passthru_attrs,
  648. .ct_owner = THIS_MODULE,
  649. };
  650. static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
  651. {
  652. config_group_init_type_name(&subsys->passthru_group,
  653. "passthru", &nvmet_passthru_type);
  654. configfs_add_default_group(&subsys->passthru_group,
  655. &subsys->group);
  656. }
  657. #else /* CONFIG_NVME_TARGET_PASSTHRU */
  658. static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
  659. {
  660. }
  661. #endif /* CONFIG_NVME_TARGET_PASSTHRU */
  662. static int nvmet_port_subsys_allow_link(struct config_item *parent,
  663. struct config_item *target)
  664. {
  665. struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
  666. struct nvmet_subsys *subsys;
  667. struct nvmet_subsys_link *link, *p;
  668. int ret;
  669. if (target->ci_type != &nvmet_subsys_type) {
  670. pr_err("can only link subsystems into the subsystems dir.!\n");
  671. return -EINVAL;
  672. }
  673. subsys = to_subsys(target);
  674. link = kmalloc(sizeof(*link), GFP_KERNEL);
  675. if (!link)
  676. return -ENOMEM;
  677. link->subsys = subsys;
  678. down_write(&nvmet_config_sem);
  679. ret = -EEXIST;
  680. list_for_each_entry(p, &port->subsystems, entry) {
  681. if (p->subsys == subsys)
  682. goto out_free_link;
  683. }
  684. if (list_empty(&port->subsystems)) {
  685. ret = nvmet_enable_port(port);
  686. if (ret)
  687. goto out_free_link;
  688. }
  689. list_add_tail(&link->entry, &port->subsystems);
  690. nvmet_port_disc_changed(port, subsys);
  691. up_write(&nvmet_config_sem);
  692. return 0;
  693. out_free_link:
  694. up_write(&nvmet_config_sem);
  695. kfree(link);
  696. return ret;
  697. }
  698. static void nvmet_port_subsys_drop_link(struct config_item *parent,
  699. struct config_item *target)
  700. {
  701. struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
  702. struct nvmet_subsys *subsys = to_subsys(target);
  703. struct nvmet_subsys_link *p;
  704. down_write(&nvmet_config_sem);
  705. list_for_each_entry(p, &port->subsystems, entry) {
  706. if (p->subsys == subsys)
  707. goto found;
  708. }
  709. up_write(&nvmet_config_sem);
  710. return;
  711. found:
  712. list_del(&p->entry);
  713. nvmet_port_del_ctrls(port, subsys);
  714. nvmet_port_disc_changed(port, subsys);
  715. if (list_empty(&port->subsystems))
  716. nvmet_disable_port(port);
  717. up_write(&nvmet_config_sem);
  718. kfree(p);
  719. }
  720. static struct configfs_item_operations nvmet_port_subsys_item_ops = {
  721. .allow_link = nvmet_port_subsys_allow_link,
  722. .drop_link = nvmet_port_subsys_drop_link,
  723. };
  724. static const struct config_item_type nvmet_port_subsys_type = {
  725. .ct_item_ops = &nvmet_port_subsys_item_ops,
  726. .ct_owner = THIS_MODULE,
  727. };
  728. static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
  729. struct config_item *target)
  730. {
  731. struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
  732. struct nvmet_host *host;
  733. struct nvmet_host_link *link, *p;
  734. int ret;
  735. if (target->ci_type != &nvmet_host_type) {
  736. pr_err("can only link hosts into the allowed_hosts directory!\n");
  737. return -EINVAL;
  738. }
  739. host = to_host(target);
  740. link = kmalloc(sizeof(*link), GFP_KERNEL);
  741. if (!link)
  742. return -ENOMEM;
  743. link->host = host;
  744. down_write(&nvmet_config_sem);
  745. ret = -EINVAL;
  746. if (subsys->allow_any_host) {
  747. pr_err("can't add hosts when allow_any_host is set!\n");
  748. goto out_free_link;
  749. }
  750. ret = -EEXIST;
  751. list_for_each_entry(p, &subsys->hosts, entry) {
  752. if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
  753. goto out_free_link;
  754. }
  755. list_add_tail(&link->entry, &subsys->hosts);
  756. nvmet_subsys_disc_changed(subsys, host);
  757. up_write(&nvmet_config_sem);
  758. return 0;
  759. out_free_link:
  760. up_write(&nvmet_config_sem);
  761. kfree(link);
  762. return ret;
  763. }
  764. static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
  765. struct config_item *target)
  766. {
  767. struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
  768. struct nvmet_host *host = to_host(target);
  769. struct nvmet_host_link *p;
  770. down_write(&nvmet_config_sem);
  771. list_for_each_entry(p, &subsys->hosts, entry) {
  772. if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
  773. goto found;
  774. }
  775. up_write(&nvmet_config_sem);
  776. return;
  777. found:
  778. list_del(&p->entry);
  779. nvmet_subsys_disc_changed(subsys, host);
  780. up_write(&nvmet_config_sem);
  781. kfree(p);
  782. }
  783. static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
  784. .allow_link = nvmet_allowed_hosts_allow_link,
  785. .drop_link = nvmet_allowed_hosts_drop_link,
  786. };
  787. static const struct config_item_type nvmet_allowed_hosts_type = {
  788. .ct_item_ops = &nvmet_allowed_hosts_item_ops,
  789. .ct_owner = THIS_MODULE,
  790. };
  791. static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
  792. char *page)
  793. {
  794. return snprintf(page, PAGE_SIZE, "%d\n",
  795. to_subsys(item)->allow_any_host);
  796. }
  797. static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
  798. const char *page, size_t count)
  799. {
  800. struct nvmet_subsys *subsys = to_subsys(item);
  801. bool allow_any_host;
  802. int ret = 0;
  803. if (strtobool(page, &allow_any_host))
  804. return -EINVAL;
  805. down_write(&nvmet_config_sem);
  806. if (allow_any_host && !list_empty(&subsys->hosts)) {
  807. pr_err("Can't set allow_any_host when explicit hosts are set!\n");
  808. ret = -EINVAL;
  809. goto out_unlock;
  810. }
  811. if (subsys->allow_any_host != allow_any_host) {
  812. subsys->allow_any_host = allow_any_host;
  813. nvmet_subsys_disc_changed(subsys, NULL);
  814. }
  815. out_unlock:
  816. up_write(&nvmet_config_sem);
  817. return ret ? ret : count;
  818. }
  819. CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
  820. static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
  821. char *page)
  822. {
  823. struct nvmet_subsys *subsys = to_subsys(item);
  824. if (NVME_TERTIARY(subsys->ver))
  825. return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
  826. NVME_MAJOR(subsys->ver),
  827. NVME_MINOR(subsys->ver),
  828. NVME_TERTIARY(subsys->ver));
  829. return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
  830. NVME_MAJOR(subsys->ver),
  831. NVME_MINOR(subsys->ver));
  832. }
  833. static ssize_t
  834. nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
  835. const char *page, size_t count)
  836. {
  837. int major, minor, tertiary = 0;
  838. int ret;
  839. if (subsys->subsys_discovered) {
  840. if (NVME_TERTIARY(subsys->ver))
  841. pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
  842. NVME_MAJOR(subsys->ver),
  843. NVME_MINOR(subsys->ver),
  844. NVME_TERTIARY(subsys->ver));
  845. else
  846. pr_err("Can't set version number. %llu.%llu is already assigned\n",
  847. NVME_MAJOR(subsys->ver),
  848. NVME_MINOR(subsys->ver));
  849. return -EINVAL;
  850. }
  851. /* passthru subsystems use the underlying controller's version */
  852. if (nvmet_is_passthru_subsys(subsys))
  853. return -EINVAL;
  854. ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
  855. if (ret != 2 && ret != 3)
  856. return -EINVAL;
  857. subsys->ver = NVME_VS(major, minor, tertiary);
  858. return count;
  859. }
  860. static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
  861. const char *page, size_t count)
  862. {
  863. struct nvmet_subsys *subsys = to_subsys(item);
  864. ssize_t ret;
  865. down_write(&nvmet_config_sem);
  866. mutex_lock(&subsys->lock);
  867. ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
  868. mutex_unlock(&subsys->lock);
  869. up_write(&nvmet_config_sem);
  870. return ret;
  871. }
  872. CONFIGFS_ATTR(nvmet_subsys_, attr_version);
  873. /* See Section 1.5 of NVMe 1.4 */
  874. static bool nvmet_is_ascii(const char c)
  875. {
  876. return c >= 0x20 && c <= 0x7e;
  877. }
  878. static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
  879. char *page)
  880. {
  881. struct nvmet_subsys *subsys = to_subsys(item);
  882. return snprintf(page, PAGE_SIZE, "%.*s\n",
  883. NVMET_SN_MAX_SIZE, subsys->serial);
  884. }
  885. static ssize_t
  886. nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
  887. const char *page, size_t count)
  888. {
  889. int pos, len = strcspn(page, "\n");
  890. if (subsys->subsys_discovered) {
  891. pr_err("Can't set serial number. %s is already assigned\n",
  892. subsys->serial);
  893. return -EINVAL;
  894. }
  895. if (!len || len > NVMET_SN_MAX_SIZE) {
  896. pr_err("Serial Number can not be empty or exceed %d Bytes\n",
  897. NVMET_SN_MAX_SIZE);
  898. return -EINVAL;
  899. }
  900. for (pos = 0; pos < len; pos++) {
  901. if (!nvmet_is_ascii(page[pos])) {
  902. pr_err("Serial Number must contain only ASCII strings\n");
  903. return -EINVAL;
  904. }
  905. }
  906. memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
  907. return count;
  908. }
  909. static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
  910. const char *page, size_t count)
  911. {
  912. struct nvmet_subsys *subsys = to_subsys(item);
  913. ssize_t ret;
  914. down_write(&nvmet_config_sem);
  915. mutex_lock(&subsys->lock);
  916. ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
  917. mutex_unlock(&subsys->lock);
  918. up_write(&nvmet_config_sem);
  919. return ret;
  920. }
  921. CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
  922. static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
  923. char *page)
  924. {
  925. return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
  926. }
  927. static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
  928. const char *page, size_t cnt)
  929. {
  930. u16 cntlid_min;
  931. if (sscanf(page, "%hu\n", &cntlid_min) != 1)
  932. return -EINVAL;
  933. if (cntlid_min == 0)
  934. return -EINVAL;
  935. down_write(&nvmet_config_sem);
  936. if (cntlid_min >= to_subsys(item)->cntlid_max)
  937. goto out_unlock;
  938. to_subsys(item)->cntlid_min = cntlid_min;
  939. up_write(&nvmet_config_sem);
  940. return cnt;
  941. out_unlock:
  942. up_write(&nvmet_config_sem);
  943. return -EINVAL;
  944. }
  945. CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
  946. static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
  947. char *page)
  948. {
  949. return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
  950. }
  951. static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
  952. const char *page, size_t cnt)
  953. {
  954. u16 cntlid_max;
  955. if (sscanf(page, "%hu\n", &cntlid_max) != 1)
  956. return -EINVAL;
  957. if (cntlid_max == 0)
  958. return -EINVAL;
  959. down_write(&nvmet_config_sem);
  960. if (cntlid_max <= to_subsys(item)->cntlid_min)
  961. goto out_unlock;
  962. to_subsys(item)->cntlid_max = cntlid_max;
  963. up_write(&nvmet_config_sem);
  964. return cnt;
  965. out_unlock:
  966. up_write(&nvmet_config_sem);
  967. return -EINVAL;
  968. }
  969. CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
  970. static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
  971. char *page)
  972. {
  973. struct nvmet_subsys *subsys = to_subsys(item);
  974. return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
  975. }
  976. static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
  977. const char *page, size_t count)
  978. {
  979. int pos = 0, len;
  980. char *val;
  981. if (subsys->subsys_discovered) {
  982. pr_err("Can't set model number. %s is already assigned\n",
  983. subsys->model_number);
  984. return -EINVAL;
  985. }
  986. len = strcspn(page, "\n");
  987. if (!len)
  988. return -EINVAL;
  989. if (len > NVMET_MN_MAX_SIZE) {
  990. pr_err("Model number size can not exceed %d Bytes\n",
  991. NVMET_MN_MAX_SIZE);
  992. return -EINVAL;
  993. }
  994. for (pos = 0; pos < len; pos++) {
  995. if (!nvmet_is_ascii(page[pos]))
  996. return -EINVAL;
  997. }
  998. val = kmemdup_nul(page, len, GFP_KERNEL);
  999. if (!val)
  1000. return -ENOMEM;
  1001. kfree(subsys->model_number);
  1002. subsys->model_number = val;
  1003. return count;
  1004. }
  1005. static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
  1006. const char *page, size_t count)
  1007. {
  1008. struct nvmet_subsys *subsys = to_subsys(item);
  1009. ssize_t ret;
  1010. down_write(&nvmet_config_sem);
  1011. mutex_lock(&subsys->lock);
  1012. ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
  1013. mutex_unlock(&subsys->lock);
  1014. up_write(&nvmet_config_sem);
  1015. return ret;
  1016. }
  1017. CONFIGFS_ATTR(nvmet_subsys_, attr_model);
  1018. #ifdef CONFIG_BLK_DEV_INTEGRITY
  1019. static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
  1020. char *page)
  1021. {
  1022. return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
  1023. }
  1024. static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
  1025. const char *page, size_t count)
  1026. {
  1027. struct nvmet_subsys *subsys = to_subsys(item);
  1028. bool pi_enable;
  1029. if (strtobool(page, &pi_enable))
  1030. return -EINVAL;
  1031. subsys->pi_support = pi_enable;
  1032. return count;
  1033. }
  1034. CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
  1035. #endif
  1036. static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
  1037. char *page)
  1038. {
  1039. return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
  1040. }
  1041. static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
  1042. const char *page, size_t cnt)
  1043. {
  1044. u16 qid_max;
  1045. if (sscanf(page, "%hu\n", &qid_max) != 1)
  1046. return -EINVAL;
  1047. if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
  1048. return -EINVAL;
  1049. down_write(&nvmet_config_sem);
  1050. to_subsys(item)->max_qid = qid_max;
  1051. up_write(&nvmet_config_sem);
  1052. return cnt;
  1053. }
  1054. CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
  1055. static struct configfs_attribute *nvmet_subsys_attrs[] = {
  1056. &nvmet_subsys_attr_attr_allow_any_host,
  1057. &nvmet_subsys_attr_attr_version,
  1058. &nvmet_subsys_attr_attr_serial,
  1059. &nvmet_subsys_attr_attr_cntlid_min,
  1060. &nvmet_subsys_attr_attr_cntlid_max,
  1061. &nvmet_subsys_attr_attr_model,
  1062. &nvmet_subsys_attr_attr_qid_max,
  1063. #ifdef CONFIG_BLK_DEV_INTEGRITY
  1064. &nvmet_subsys_attr_attr_pi_enable,
  1065. #endif
  1066. NULL,
  1067. };
  1068. /*
  1069. * Subsystem structures & folder operation functions below
  1070. */
  1071. static void nvmet_subsys_release(struct config_item *item)
  1072. {
  1073. struct nvmet_subsys *subsys = to_subsys(item);
  1074. nvmet_subsys_del_ctrls(subsys);
  1075. nvmet_subsys_put(subsys);
  1076. }
  1077. static struct configfs_item_operations nvmet_subsys_item_ops = {
  1078. .release = nvmet_subsys_release,
  1079. };
  1080. static const struct config_item_type nvmet_subsys_type = {
  1081. .ct_item_ops = &nvmet_subsys_item_ops,
  1082. .ct_attrs = nvmet_subsys_attrs,
  1083. .ct_owner = THIS_MODULE,
  1084. };
  1085. static struct config_group *nvmet_subsys_make(struct config_group *group,
  1086. const char *name)
  1087. {
  1088. struct nvmet_subsys *subsys;
  1089. if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
  1090. pr_err("can't create discovery subsystem through configfs\n");
  1091. return ERR_PTR(-EINVAL);
  1092. }
  1093. subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
  1094. if (IS_ERR(subsys))
  1095. return ERR_CAST(subsys);
  1096. config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
  1097. config_group_init_type_name(&subsys->namespaces_group,
  1098. "namespaces", &nvmet_namespaces_type);
  1099. configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
  1100. config_group_init_type_name(&subsys->allowed_hosts_group,
  1101. "allowed_hosts", &nvmet_allowed_hosts_type);
  1102. configfs_add_default_group(&subsys->allowed_hosts_group,
  1103. &subsys->group);
  1104. nvmet_add_passthru_group(subsys);
  1105. return &subsys->group;
  1106. }
  1107. static struct configfs_group_operations nvmet_subsystems_group_ops = {
  1108. .make_group = nvmet_subsys_make,
  1109. };
  1110. static const struct config_item_type nvmet_subsystems_type = {
  1111. .ct_group_ops = &nvmet_subsystems_group_ops,
  1112. .ct_owner = THIS_MODULE,
  1113. };
  1114. static ssize_t nvmet_referral_enable_show(struct config_item *item,
  1115. char *page)
  1116. {
  1117. return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
  1118. }
  1119. static ssize_t nvmet_referral_enable_store(struct config_item *item,
  1120. const char *page, size_t count)
  1121. {
  1122. struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
  1123. struct nvmet_port *port = to_nvmet_port(item);
  1124. bool enable;
  1125. if (strtobool(page, &enable))
  1126. goto inval;
  1127. if (enable)
  1128. nvmet_referral_enable(parent, port);
  1129. else
  1130. nvmet_referral_disable(parent, port);
  1131. return count;
  1132. inval:
  1133. pr_err("Invalid value '%s' for enable\n", page);
  1134. return -EINVAL;
  1135. }
  1136. CONFIGFS_ATTR(nvmet_referral_, enable);
  1137. /*
  1138. * Discovery Service subsystem definitions
  1139. */
  1140. static struct configfs_attribute *nvmet_referral_attrs[] = {
  1141. &nvmet_attr_addr_adrfam,
  1142. &nvmet_attr_addr_portid,
  1143. &nvmet_attr_addr_treq,
  1144. &nvmet_attr_addr_traddr,
  1145. &nvmet_attr_addr_trsvcid,
  1146. &nvmet_attr_addr_trtype,
  1147. &nvmet_referral_attr_enable,
  1148. NULL,
  1149. };
  1150. static void nvmet_referral_notify(struct config_group *group,
  1151. struct config_item *item)
  1152. {
  1153. struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
  1154. struct nvmet_port *port = to_nvmet_port(item);
  1155. nvmet_referral_disable(parent, port);
  1156. }
  1157. static void nvmet_referral_release(struct config_item *item)
  1158. {
  1159. struct nvmet_port *port = to_nvmet_port(item);
  1160. kfree(port);
  1161. }
  1162. static struct configfs_item_operations nvmet_referral_item_ops = {
  1163. .release = nvmet_referral_release,
  1164. };
  1165. static const struct config_item_type nvmet_referral_type = {
  1166. .ct_owner = THIS_MODULE,
  1167. .ct_attrs = nvmet_referral_attrs,
  1168. .ct_item_ops = &nvmet_referral_item_ops,
  1169. };
  1170. static struct config_group *nvmet_referral_make(
  1171. struct config_group *group, const char *name)
  1172. {
  1173. struct nvmet_port *port;
  1174. port = kzalloc(sizeof(*port), GFP_KERNEL);
  1175. if (!port)
  1176. return ERR_PTR(-ENOMEM);
  1177. INIT_LIST_HEAD(&port->entry);
  1178. config_group_init_type_name(&port->group, name, &nvmet_referral_type);
  1179. return &port->group;
  1180. }
  1181. static struct configfs_group_operations nvmet_referral_group_ops = {
  1182. .make_group = nvmet_referral_make,
  1183. .disconnect_notify = nvmet_referral_notify,
  1184. };
  1185. static const struct config_item_type nvmet_referrals_type = {
  1186. .ct_owner = THIS_MODULE,
  1187. .ct_group_ops = &nvmet_referral_group_ops,
  1188. };
  1189. static struct nvmet_type_name_map nvmet_ana_state[] = {
  1190. { NVME_ANA_OPTIMIZED, "optimized" },
  1191. { NVME_ANA_NONOPTIMIZED, "non-optimized" },
  1192. { NVME_ANA_INACCESSIBLE, "inaccessible" },
  1193. { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
  1194. { NVME_ANA_CHANGE, "change" },
  1195. };
  1196. static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
  1197. char *page)
  1198. {
  1199. struct nvmet_ana_group *grp = to_ana_group(item);
  1200. enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
  1201. int i;
  1202. for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
  1203. if (state == nvmet_ana_state[i].type)
  1204. return sprintf(page, "%s\n", nvmet_ana_state[i].name);
  1205. }
  1206. return sprintf(page, "\n");
  1207. }
  1208. static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
  1209. const char *page, size_t count)
  1210. {
  1211. struct nvmet_ana_group *grp = to_ana_group(item);
  1212. enum nvme_ana_state *ana_state = grp->port->ana_state;
  1213. int i;
  1214. for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
  1215. if (sysfs_streq(page, nvmet_ana_state[i].name))
  1216. goto found;
  1217. }
  1218. pr_err("Invalid value '%s' for ana_state\n", page);
  1219. return -EINVAL;
  1220. found:
  1221. down_write(&nvmet_ana_sem);
  1222. ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
  1223. nvmet_ana_chgcnt++;
  1224. up_write(&nvmet_ana_sem);
  1225. nvmet_port_send_ana_event(grp->port);
  1226. return count;
  1227. }
  1228. CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
  1229. static struct configfs_attribute *nvmet_ana_group_attrs[] = {
  1230. &nvmet_ana_group_attr_ana_state,
  1231. NULL,
  1232. };
  1233. static void nvmet_ana_group_release(struct config_item *item)
  1234. {
  1235. struct nvmet_ana_group *grp = to_ana_group(item);
  1236. if (grp == &grp->port->ana_default_group)
  1237. return;
  1238. down_write(&nvmet_ana_sem);
  1239. grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
  1240. nvmet_ana_group_enabled[grp->grpid]--;
  1241. up_write(&nvmet_ana_sem);
  1242. nvmet_port_send_ana_event(grp->port);
  1243. kfree(grp);
  1244. }
  1245. static struct configfs_item_operations nvmet_ana_group_item_ops = {
  1246. .release = nvmet_ana_group_release,
  1247. };
  1248. static const struct config_item_type nvmet_ana_group_type = {
  1249. .ct_item_ops = &nvmet_ana_group_item_ops,
  1250. .ct_attrs = nvmet_ana_group_attrs,
  1251. .ct_owner = THIS_MODULE,
  1252. };
  1253. static struct config_group *nvmet_ana_groups_make_group(
  1254. struct config_group *group, const char *name)
  1255. {
  1256. struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
  1257. struct nvmet_ana_group *grp;
  1258. u32 grpid;
  1259. int ret;
  1260. ret = kstrtou32(name, 0, &grpid);
  1261. if (ret)
  1262. goto out;
  1263. ret = -EINVAL;
  1264. if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
  1265. goto out;
  1266. ret = -ENOMEM;
  1267. grp = kzalloc(sizeof(*grp), GFP_KERNEL);
  1268. if (!grp)
  1269. goto out;
  1270. grp->port = port;
  1271. grp->grpid = grpid;
  1272. down_write(&nvmet_ana_sem);
  1273. nvmet_ana_group_enabled[grpid]++;
  1274. up_write(&nvmet_ana_sem);
  1275. nvmet_port_send_ana_event(grp->port);
  1276. config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
  1277. return &grp->group;
  1278. out:
  1279. return ERR_PTR(ret);
  1280. }
  1281. static struct configfs_group_operations nvmet_ana_groups_group_ops = {
  1282. .make_group = nvmet_ana_groups_make_group,
  1283. };
  1284. static const struct config_item_type nvmet_ana_groups_type = {
  1285. .ct_group_ops = &nvmet_ana_groups_group_ops,
  1286. .ct_owner = THIS_MODULE,
  1287. };
  1288. /*
  1289. * Ports definitions.
  1290. */
  1291. static void nvmet_port_release(struct config_item *item)
  1292. {
  1293. struct nvmet_port *port = to_nvmet_port(item);
  1294. /* Let inflight controllers teardown complete */
  1295. flush_workqueue(nvmet_wq);
  1296. list_del(&port->global_entry);
  1297. kfree(port->ana_state);
  1298. kfree(port);
  1299. }
  1300. static struct configfs_attribute *nvmet_port_attrs[] = {
  1301. &nvmet_attr_addr_adrfam,
  1302. &nvmet_attr_addr_treq,
  1303. &nvmet_attr_addr_traddr,
  1304. &nvmet_attr_addr_trsvcid,
  1305. &nvmet_attr_addr_trtype,
  1306. &nvmet_attr_param_inline_data_size,
  1307. #ifdef CONFIG_BLK_DEV_INTEGRITY
  1308. &nvmet_attr_param_pi_enable,
  1309. #endif
  1310. NULL,
  1311. };
  1312. static struct configfs_item_operations nvmet_port_item_ops = {
  1313. .release = nvmet_port_release,
  1314. };
  1315. static const struct config_item_type nvmet_port_type = {
  1316. .ct_attrs = nvmet_port_attrs,
  1317. .ct_item_ops = &nvmet_port_item_ops,
  1318. .ct_owner = THIS_MODULE,
  1319. };
  1320. static struct config_group *nvmet_ports_make(struct config_group *group,
  1321. const char *name)
  1322. {
  1323. struct nvmet_port *port;
  1324. u16 portid;
  1325. u32 i;
  1326. if (kstrtou16(name, 0, &portid))
  1327. return ERR_PTR(-EINVAL);
  1328. port = kzalloc(sizeof(*port), GFP_KERNEL);
  1329. if (!port)
  1330. return ERR_PTR(-ENOMEM);
  1331. port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
  1332. sizeof(*port->ana_state), GFP_KERNEL);
  1333. if (!port->ana_state) {
  1334. kfree(port);
  1335. return ERR_PTR(-ENOMEM);
  1336. }
  1337. for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
  1338. if (i == NVMET_DEFAULT_ANA_GRPID)
  1339. port->ana_state[1] = NVME_ANA_OPTIMIZED;
  1340. else
  1341. port->ana_state[i] = NVME_ANA_INACCESSIBLE;
  1342. }
  1343. list_add(&port->global_entry, &nvmet_ports_list);
  1344. INIT_LIST_HEAD(&port->entry);
  1345. INIT_LIST_HEAD(&port->subsystems);
  1346. INIT_LIST_HEAD(&port->referrals);
  1347. port->inline_data_size = -1; /* < 0 == let the transport choose */
  1348. port->disc_addr.portid = cpu_to_le16(portid);
  1349. port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
  1350. port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
  1351. config_group_init_type_name(&port->group, name, &nvmet_port_type);
  1352. config_group_init_type_name(&port->subsys_group,
  1353. "subsystems", &nvmet_port_subsys_type);
  1354. configfs_add_default_group(&port->subsys_group, &port->group);
  1355. config_group_init_type_name(&port->referrals_group,
  1356. "referrals", &nvmet_referrals_type);
  1357. configfs_add_default_group(&port->referrals_group, &port->group);
  1358. config_group_init_type_name(&port->ana_groups_group,
  1359. "ana_groups", &nvmet_ana_groups_type);
  1360. configfs_add_default_group(&port->ana_groups_group, &port->group);
  1361. port->ana_default_group.port = port;
  1362. port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
  1363. config_group_init_type_name(&port->ana_default_group.group,
  1364. __stringify(NVMET_DEFAULT_ANA_GRPID),
  1365. &nvmet_ana_group_type);
  1366. configfs_add_default_group(&port->ana_default_group.group,
  1367. &port->ana_groups_group);
  1368. return &port->group;
  1369. }
  1370. static struct configfs_group_operations nvmet_ports_group_ops = {
  1371. .make_group = nvmet_ports_make,
  1372. };
  1373. static const struct config_item_type nvmet_ports_type = {
  1374. .ct_group_ops = &nvmet_ports_group_ops,
  1375. .ct_owner = THIS_MODULE,
  1376. };
  1377. static struct config_group nvmet_subsystems_group;
  1378. static struct config_group nvmet_ports_group;
  1379. #ifdef CONFIG_NVME_TARGET_AUTH
  1380. static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
  1381. char *page)
  1382. {
  1383. u8 *dhchap_secret = to_host(item)->dhchap_secret;
  1384. if (!dhchap_secret)
  1385. return sprintf(page, "\n");
  1386. return sprintf(page, "%s\n", dhchap_secret);
  1387. }
  1388. static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
  1389. const char *page, size_t count)
  1390. {
  1391. struct nvmet_host *host = to_host(item);
  1392. int ret;
  1393. ret = nvmet_auth_set_key(host, page, false);
  1394. /*
  1395. * Re-authentication is a soft state, so keep the
  1396. * current authentication valid until the host
  1397. * requests re-authentication.
  1398. */
  1399. return ret < 0 ? ret : count;
  1400. }
  1401. CONFIGFS_ATTR(nvmet_host_, dhchap_key);
  1402. static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
  1403. char *page)
  1404. {
  1405. u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
  1406. if (!dhchap_secret)
  1407. return sprintf(page, "\n");
  1408. return sprintf(page, "%s\n", dhchap_secret);
  1409. }
  1410. static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
  1411. const char *page, size_t count)
  1412. {
  1413. struct nvmet_host *host = to_host(item);
  1414. int ret;
  1415. ret = nvmet_auth_set_key(host, page, true);
  1416. /*
  1417. * Re-authentication is a soft state, so keep the
  1418. * current authentication valid until the host
  1419. * requests re-authentication.
  1420. */
  1421. return ret < 0 ? ret : count;
  1422. }
  1423. CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
  1424. static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
  1425. char *page)
  1426. {
  1427. struct nvmet_host *host = to_host(item);
  1428. const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
  1429. return sprintf(page, "%s\n", hash_name ? hash_name : "none");
  1430. }
  1431. static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
  1432. const char *page, size_t count)
  1433. {
  1434. struct nvmet_host *host = to_host(item);
  1435. u8 hmac_id;
  1436. hmac_id = nvme_auth_hmac_id(page);
  1437. if (hmac_id == NVME_AUTH_HASH_INVALID)
  1438. return -EINVAL;
  1439. if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
  1440. return -ENOTSUPP;
  1441. host->dhchap_hash_id = hmac_id;
  1442. return count;
  1443. }
  1444. CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
  1445. static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
  1446. char *page)
  1447. {
  1448. struct nvmet_host *host = to_host(item);
  1449. const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
  1450. return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
  1451. }
  1452. static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
  1453. const char *page, size_t count)
  1454. {
  1455. struct nvmet_host *host = to_host(item);
  1456. int dhgroup_id;
  1457. dhgroup_id = nvme_auth_dhgroup_id(page);
  1458. if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
  1459. return -EINVAL;
  1460. if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
  1461. const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
  1462. if (!crypto_has_kpp(kpp, 0, 0))
  1463. return -EINVAL;
  1464. }
  1465. host->dhchap_dhgroup_id = dhgroup_id;
  1466. return count;
  1467. }
  1468. CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
  1469. static struct configfs_attribute *nvmet_host_attrs[] = {
  1470. &nvmet_host_attr_dhchap_key,
  1471. &nvmet_host_attr_dhchap_ctrl_key,
  1472. &nvmet_host_attr_dhchap_hash,
  1473. &nvmet_host_attr_dhchap_dhgroup,
  1474. NULL,
  1475. };
  1476. #endif /* CONFIG_NVME_TARGET_AUTH */
  1477. static void nvmet_host_release(struct config_item *item)
  1478. {
  1479. struct nvmet_host *host = to_host(item);
  1480. #ifdef CONFIG_NVME_TARGET_AUTH
  1481. kfree(host->dhchap_secret);
  1482. kfree(host->dhchap_ctrl_secret);
  1483. #endif
  1484. kfree(host);
  1485. }
  1486. static struct configfs_item_operations nvmet_host_item_ops = {
  1487. .release = nvmet_host_release,
  1488. };
  1489. static const struct config_item_type nvmet_host_type = {
  1490. .ct_item_ops = &nvmet_host_item_ops,
  1491. #ifdef CONFIG_NVME_TARGET_AUTH
  1492. .ct_attrs = nvmet_host_attrs,
  1493. #endif
  1494. .ct_owner = THIS_MODULE,
  1495. };
  1496. static struct config_group *nvmet_hosts_make_group(struct config_group *group,
  1497. const char *name)
  1498. {
  1499. struct nvmet_host *host;
  1500. host = kzalloc(sizeof(*host), GFP_KERNEL);
  1501. if (!host)
  1502. return ERR_PTR(-ENOMEM);
  1503. #ifdef CONFIG_NVME_TARGET_AUTH
  1504. /* Default to SHA256 */
  1505. host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
  1506. #endif
  1507. config_group_init_type_name(&host->group, name, &nvmet_host_type);
  1508. return &host->group;
  1509. }
  1510. static struct configfs_group_operations nvmet_hosts_group_ops = {
  1511. .make_group = nvmet_hosts_make_group,
  1512. };
  1513. static const struct config_item_type nvmet_hosts_type = {
  1514. .ct_group_ops = &nvmet_hosts_group_ops,
  1515. .ct_owner = THIS_MODULE,
  1516. };
  1517. static struct config_group nvmet_hosts_group;
  1518. static const struct config_item_type nvmet_root_type = {
  1519. .ct_owner = THIS_MODULE,
  1520. };
  1521. static struct configfs_subsystem nvmet_configfs_subsystem = {
  1522. .su_group = {
  1523. .cg_item = {
  1524. .ci_namebuf = "nvmet",
  1525. .ci_type = &nvmet_root_type,
  1526. },
  1527. },
  1528. };
  1529. int __init nvmet_init_configfs(void)
  1530. {
  1531. int ret;
  1532. config_group_init(&nvmet_configfs_subsystem.su_group);
  1533. mutex_init(&nvmet_configfs_subsystem.su_mutex);
  1534. config_group_init_type_name(&nvmet_subsystems_group,
  1535. "subsystems", &nvmet_subsystems_type);
  1536. configfs_add_default_group(&nvmet_subsystems_group,
  1537. &nvmet_configfs_subsystem.su_group);
  1538. config_group_init_type_name(&nvmet_ports_group,
  1539. "ports", &nvmet_ports_type);
  1540. configfs_add_default_group(&nvmet_ports_group,
  1541. &nvmet_configfs_subsystem.su_group);
  1542. config_group_init_type_name(&nvmet_hosts_group,
  1543. "hosts", &nvmet_hosts_type);
  1544. configfs_add_default_group(&nvmet_hosts_group,
  1545. &nvmet_configfs_subsystem.su_group);
  1546. ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
  1547. if (ret) {
  1548. pr_err("configfs_register_subsystem: %d\n", ret);
  1549. return ret;
  1550. }
  1551. return 0;
  1552. }
  1553. void __exit nvmet_exit_configfs(void)
  1554. {
  1555. configfs_unregister_subsystem(&nvmet_configfs_subsystem);
  1556. }