dimm_devs.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/moduleparam.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/device.h>
  9. #include <linux/ndctl.h>
  10. #include <linux/slab.h>
  11. #include <linux/io.h>
  12. #include <linux/fs.h>
  13. #include <linux/mm.h>
  14. #include "nd-core.h"
  15. #include "label.h"
  16. #include "pmem.h"
  17. #include "nd.h"
  18. static DEFINE_IDA(dimm_ida);
  19. /*
  20. * Retrieve bus and dimm handle and return if this bus supports
  21. * get_config_data commands
  22. */
  23. int nvdimm_check_config_data(struct device *dev)
  24. {
  25. struct nvdimm *nvdimm = to_nvdimm(dev);
  26. if (!nvdimm->cmd_mask ||
  27. !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  28. if (test_bit(NDD_LABELING, &nvdimm->flags))
  29. return -ENXIO;
  30. else
  31. return -ENOTTY;
  32. }
  33. return 0;
  34. }
  35. static int validate_dimm(struct nvdimm_drvdata *ndd)
  36. {
  37. int rc;
  38. if (!ndd)
  39. return -EINVAL;
  40. rc = nvdimm_check_config_data(ndd->dev);
  41. if (rc)
  42. dev_dbg(ndd->dev, "%ps: %s error: %d\n",
  43. __builtin_return_address(0), __func__, rc);
  44. return rc;
  45. }
  46. /**
  47. * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  48. * @nvdimm: dimm to initialize
  49. */
  50. int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  51. {
  52. struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  53. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  54. struct nvdimm_bus_descriptor *nd_desc;
  55. int rc = validate_dimm(ndd);
  56. int cmd_rc = 0;
  57. if (rc)
  58. return rc;
  59. if (cmd->config_size)
  60. return 0; /* already valid */
  61. memset(cmd, 0, sizeof(*cmd));
  62. nd_desc = nvdimm_bus->nd_desc;
  63. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  64. ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  65. if (rc < 0)
  66. return rc;
  67. return cmd_rc;
  68. }
  69. int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
  70. size_t offset, size_t len)
  71. {
  72. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  73. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  74. int rc = validate_dimm(ndd), cmd_rc = 0;
  75. struct nd_cmd_get_config_data_hdr *cmd;
  76. size_t max_cmd_size, buf_offset;
  77. if (rc)
  78. return rc;
  79. if (offset + len > ndd->nsarea.config_size)
  80. return -ENXIO;
  81. max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
  82. cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
  83. if (!cmd)
  84. return -ENOMEM;
  85. for (buf_offset = 0; len;
  86. len -= cmd->in_length, buf_offset += cmd->in_length) {
  87. size_t cmd_size;
  88. cmd->in_offset = offset + buf_offset;
  89. cmd->in_length = min(max_cmd_size, len);
  90. cmd_size = sizeof(*cmd) + cmd->in_length;
  91. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  92. ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
  93. if (rc < 0)
  94. break;
  95. if (cmd_rc < 0) {
  96. rc = cmd_rc;
  97. break;
  98. }
  99. /* out_buf should be valid, copy it into our output buffer */
  100. memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
  101. }
  102. kvfree(cmd);
  103. return rc;
  104. }
  105. int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
  106. void *buf, size_t len)
  107. {
  108. size_t max_cmd_size, buf_offset;
  109. struct nd_cmd_set_config_hdr *cmd;
  110. int rc = validate_dimm(ndd), cmd_rc = 0;
  111. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  112. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  113. if (rc)
  114. return rc;
  115. if (offset + len > ndd->nsarea.config_size)
  116. return -ENXIO;
  117. max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
  118. cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
  119. if (!cmd)
  120. return -ENOMEM;
  121. for (buf_offset = 0; len; len -= cmd->in_length,
  122. buf_offset += cmd->in_length) {
  123. size_t cmd_size;
  124. cmd->in_offset = offset + buf_offset;
  125. cmd->in_length = min(max_cmd_size, len);
  126. memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
  127. /* status is output in the last 4-bytes of the command buffer */
  128. cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
  129. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  130. ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
  131. if (rc < 0)
  132. break;
  133. if (cmd_rc < 0) {
  134. rc = cmd_rc;
  135. break;
  136. }
  137. }
  138. kvfree(cmd);
  139. return rc;
  140. }
  141. void nvdimm_set_labeling(struct device *dev)
  142. {
  143. struct nvdimm *nvdimm = to_nvdimm(dev);
  144. set_bit(NDD_LABELING, &nvdimm->flags);
  145. }
  146. void nvdimm_set_locked(struct device *dev)
  147. {
  148. struct nvdimm *nvdimm = to_nvdimm(dev);
  149. set_bit(NDD_LOCKED, &nvdimm->flags);
  150. }
  151. void nvdimm_clear_locked(struct device *dev)
  152. {
  153. struct nvdimm *nvdimm = to_nvdimm(dev);
  154. clear_bit(NDD_LOCKED, &nvdimm->flags);
  155. }
  156. static void nvdimm_release(struct device *dev)
  157. {
  158. struct nvdimm *nvdimm = to_nvdimm(dev);
  159. ida_simple_remove(&dimm_ida, nvdimm->id);
  160. kfree(nvdimm);
  161. }
  162. struct nvdimm *to_nvdimm(struct device *dev)
  163. {
  164. struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
  165. WARN_ON(!is_nvdimm(dev));
  166. return nvdimm;
  167. }
  168. EXPORT_SYMBOL_GPL(to_nvdimm);
  169. struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
  170. {
  171. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  172. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
  173. return dev_get_drvdata(&nvdimm->dev);
  174. }
  175. EXPORT_SYMBOL(to_ndd);
  176. void nvdimm_drvdata_release(struct kref *kref)
  177. {
  178. struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
  179. struct device *dev = ndd->dev;
  180. struct resource *res, *_r;
  181. dev_dbg(dev, "trace\n");
  182. nvdimm_bus_lock(dev);
  183. for_each_dpa_resource_safe(ndd, res, _r)
  184. nvdimm_free_dpa(ndd, res);
  185. nvdimm_bus_unlock(dev);
  186. kvfree(ndd->data);
  187. kfree(ndd);
  188. put_device(dev);
  189. }
  190. void get_ndd(struct nvdimm_drvdata *ndd)
  191. {
  192. kref_get(&ndd->kref);
  193. }
  194. void put_ndd(struct nvdimm_drvdata *ndd)
  195. {
  196. if (ndd)
  197. kref_put(&ndd->kref, nvdimm_drvdata_release);
  198. }
  199. const char *nvdimm_name(struct nvdimm *nvdimm)
  200. {
  201. return dev_name(&nvdimm->dev);
  202. }
  203. EXPORT_SYMBOL_GPL(nvdimm_name);
  204. struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
  205. {
  206. return &nvdimm->dev.kobj;
  207. }
  208. EXPORT_SYMBOL_GPL(nvdimm_kobj);
  209. unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
  210. {
  211. return nvdimm->cmd_mask;
  212. }
  213. EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
  214. void *nvdimm_provider_data(struct nvdimm *nvdimm)
  215. {
  216. if (nvdimm)
  217. return nvdimm->provider_data;
  218. return NULL;
  219. }
  220. EXPORT_SYMBOL_GPL(nvdimm_provider_data);
  221. static ssize_t commands_show(struct device *dev,
  222. struct device_attribute *attr, char *buf)
  223. {
  224. struct nvdimm *nvdimm = to_nvdimm(dev);
  225. int cmd, len = 0;
  226. if (!nvdimm->cmd_mask)
  227. return sprintf(buf, "\n");
  228. for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
  229. len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
  230. len += sprintf(buf + len, "\n");
  231. return len;
  232. }
  233. static DEVICE_ATTR_RO(commands);
  234. static ssize_t flags_show(struct device *dev,
  235. struct device_attribute *attr, char *buf)
  236. {
  237. struct nvdimm *nvdimm = to_nvdimm(dev);
  238. return sprintf(buf, "%s%s\n",
  239. test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
  240. test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
  241. }
  242. static DEVICE_ATTR_RO(flags);
  243. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  244. char *buf)
  245. {
  246. struct nvdimm *nvdimm = to_nvdimm(dev);
  247. /*
  248. * The state may be in the process of changing, userspace should
  249. * quiesce probing if it wants a static answer
  250. */
  251. nvdimm_bus_lock(dev);
  252. nvdimm_bus_unlock(dev);
  253. return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
  254. ? "active" : "idle");
  255. }
  256. static DEVICE_ATTR_RO(state);
  257. static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
  258. {
  259. struct device *dev;
  260. ssize_t rc;
  261. u32 nfree;
  262. if (!ndd)
  263. return -ENXIO;
  264. dev = ndd->dev;
  265. nvdimm_bus_lock(dev);
  266. nfree = nd_label_nfree(ndd);
  267. if (nfree - 1 > nfree) {
  268. dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
  269. nfree = 0;
  270. } else
  271. nfree--;
  272. rc = sprintf(buf, "%d\n", nfree);
  273. nvdimm_bus_unlock(dev);
  274. return rc;
  275. }
  276. static ssize_t available_slots_show(struct device *dev,
  277. struct device_attribute *attr, char *buf)
  278. {
  279. ssize_t rc;
  280. device_lock(dev);
  281. rc = __available_slots_show(dev_get_drvdata(dev), buf);
  282. device_unlock(dev);
  283. return rc;
  284. }
  285. static DEVICE_ATTR_RO(available_slots);
  286. __weak ssize_t security_show(struct device *dev,
  287. struct device_attribute *attr, char *buf)
  288. {
  289. struct nvdimm *nvdimm = to_nvdimm(dev);
  290. if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
  291. return sprintf(buf, "overwrite\n");
  292. if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
  293. return sprintf(buf, "disabled\n");
  294. if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
  295. return sprintf(buf, "unlocked\n");
  296. if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
  297. return sprintf(buf, "locked\n");
  298. return -ENOTTY;
  299. }
  300. static ssize_t frozen_show(struct device *dev,
  301. struct device_attribute *attr, char *buf)
  302. {
  303. struct nvdimm *nvdimm = to_nvdimm(dev);
  304. return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
  305. &nvdimm->sec.flags));
  306. }
  307. static DEVICE_ATTR_RO(frozen);
  308. static ssize_t security_store(struct device *dev,
  309. struct device_attribute *attr, const char *buf, size_t len)
  310. {
  311. ssize_t rc;
  312. /*
  313. * Require all userspace triggered security management to be
  314. * done while probing is idle and the DIMM is not in active use
  315. * in any region.
  316. */
  317. device_lock(dev);
  318. nvdimm_bus_lock(dev);
  319. wait_nvdimm_bus_probe_idle(dev);
  320. rc = nvdimm_security_store(dev, buf, len);
  321. nvdimm_bus_unlock(dev);
  322. device_unlock(dev);
  323. return rc;
  324. }
  325. static DEVICE_ATTR_RW(security);
  326. static struct attribute *nvdimm_attributes[] = {
  327. &dev_attr_state.attr,
  328. &dev_attr_flags.attr,
  329. &dev_attr_commands.attr,
  330. &dev_attr_available_slots.attr,
  331. &dev_attr_security.attr,
  332. &dev_attr_frozen.attr,
  333. NULL,
  334. };
  335. static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
  336. {
  337. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  338. struct nvdimm *nvdimm = to_nvdimm(dev);
  339. if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
  340. return a->mode;
  341. if (!nvdimm->sec.flags)
  342. return 0;
  343. if (a == &dev_attr_security.attr) {
  344. /* Are there any state mutation ops (make writable)? */
  345. if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
  346. || nvdimm->sec.ops->change_key
  347. || nvdimm->sec.ops->erase
  348. || nvdimm->sec.ops->overwrite)
  349. return a->mode;
  350. return 0444;
  351. }
  352. if (nvdimm->sec.ops->freeze)
  353. return a->mode;
  354. return 0;
  355. }
  356. static const struct attribute_group nvdimm_attribute_group = {
  357. .attrs = nvdimm_attributes,
  358. .is_visible = nvdimm_visible,
  359. };
  360. static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
  361. {
  362. struct nvdimm *nvdimm = to_nvdimm(dev);
  363. enum nvdimm_fwa_result result;
  364. if (!nvdimm->fw_ops)
  365. return -EOPNOTSUPP;
  366. nvdimm_bus_lock(dev);
  367. result = nvdimm->fw_ops->activate_result(nvdimm);
  368. nvdimm_bus_unlock(dev);
  369. switch (result) {
  370. case NVDIMM_FWA_RESULT_NONE:
  371. return sprintf(buf, "none\n");
  372. case NVDIMM_FWA_RESULT_SUCCESS:
  373. return sprintf(buf, "success\n");
  374. case NVDIMM_FWA_RESULT_FAIL:
  375. return sprintf(buf, "fail\n");
  376. case NVDIMM_FWA_RESULT_NOTSTAGED:
  377. return sprintf(buf, "not_staged\n");
  378. case NVDIMM_FWA_RESULT_NEEDRESET:
  379. return sprintf(buf, "need_reset\n");
  380. default:
  381. return -ENXIO;
  382. }
  383. }
  384. static DEVICE_ATTR_ADMIN_RO(result);
  385. static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
  386. {
  387. struct nvdimm *nvdimm = to_nvdimm(dev);
  388. enum nvdimm_fwa_state state;
  389. if (!nvdimm->fw_ops)
  390. return -EOPNOTSUPP;
  391. nvdimm_bus_lock(dev);
  392. state = nvdimm->fw_ops->activate_state(nvdimm);
  393. nvdimm_bus_unlock(dev);
  394. switch (state) {
  395. case NVDIMM_FWA_IDLE:
  396. return sprintf(buf, "idle\n");
  397. case NVDIMM_FWA_BUSY:
  398. return sprintf(buf, "busy\n");
  399. case NVDIMM_FWA_ARMED:
  400. return sprintf(buf, "armed\n");
  401. default:
  402. return -ENXIO;
  403. }
  404. }
  405. static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
  406. const char *buf, size_t len)
  407. {
  408. struct nvdimm *nvdimm = to_nvdimm(dev);
  409. enum nvdimm_fwa_trigger arg;
  410. int rc;
  411. if (!nvdimm->fw_ops)
  412. return -EOPNOTSUPP;
  413. if (sysfs_streq(buf, "arm"))
  414. arg = NVDIMM_FWA_ARM;
  415. else if (sysfs_streq(buf, "disarm"))
  416. arg = NVDIMM_FWA_DISARM;
  417. else
  418. return -EINVAL;
  419. nvdimm_bus_lock(dev);
  420. rc = nvdimm->fw_ops->arm(nvdimm, arg);
  421. nvdimm_bus_unlock(dev);
  422. if (rc < 0)
  423. return rc;
  424. return len;
  425. }
  426. static DEVICE_ATTR_ADMIN_RW(activate);
  427. static struct attribute *nvdimm_firmware_attributes[] = {
  428. &dev_attr_activate.attr,
  429. &dev_attr_result.attr,
  430. NULL,
  431. };
  432. static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
  433. {
  434. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  435. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  436. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  437. struct nvdimm *nvdimm = to_nvdimm(dev);
  438. enum nvdimm_fwa_capability cap;
  439. if (!nd_desc->fw_ops)
  440. return 0;
  441. if (!nvdimm->fw_ops)
  442. return 0;
  443. nvdimm_bus_lock(dev);
  444. cap = nd_desc->fw_ops->capability(nd_desc);
  445. nvdimm_bus_unlock(dev);
  446. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  447. return 0;
  448. return a->mode;
  449. }
  450. static const struct attribute_group nvdimm_firmware_attribute_group = {
  451. .name = "firmware",
  452. .attrs = nvdimm_firmware_attributes,
  453. .is_visible = nvdimm_firmware_visible,
  454. };
  455. static const struct attribute_group *nvdimm_attribute_groups[] = {
  456. &nd_device_attribute_group,
  457. &nvdimm_attribute_group,
  458. &nvdimm_firmware_attribute_group,
  459. NULL,
  460. };
  461. static const struct device_type nvdimm_device_type = {
  462. .name = "nvdimm",
  463. .release = nvdimm_release,
  464. .groups = nvdimm_attribute_groups,
  465. };
  466. bool is_nvdimm(struct device *dev)
  467. {
  468. return dev->type == &nvdimm_device_type;
  469. }
  470. static struct lock_class_key nvdimm_key;
  471. struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
  472. void *provider_data, const struct attribute_group **groups,
  473. unsigned long flags, unsigned long cmd_mask, int num_flush,
  474. struct resource *flush_wpq, const char *dimm_id,
  475. const struct nvdimm_security_ops *sec_ops,
  476. const struct nvdimm_fw_ops *fw_ops)
  477. {
  478. struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
  479. struct device *dev;
  480. if (!nvdimm)
  481. return NULL;
  482. nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
  483. if (nvdimm->id < 0) {
  484. kfree(nvdimm);
  485. return NULL;
  486. }
  487. nvdimm->dimm_id = dimm_id;
  488. nvdimm->provider_data = provider_data;
  489. nvdimm->flags = flags;
  490. nvdimm->cmd_mask = cmd_mask;
  491. nvdimm->num_flush = num_flush;
  492. nvdimm->flush_wpq = flush_wpq;
  493. atomic_set(&nvdimm->busy, 0);
  494. dev = &nvdimm->dev;
  495. dev_set_name(dev, "nmem%d", nvdimm->id);
  496. dev->parent = &nvdimm_bus->dev;
  497. dev->type = &nvdimm_device_type;
  498. dev->devt = MKDEV(nvdimm_major, nvdimm->id);
  499. dev->groups = groups;
  500. nvdimm->sec.ops = sec_ops;
  501. nvdimm->fw_ops = fw_ops;
  502. nvdimm->sec.overwrite_tmo = 0;
  503. INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
  504. /*
  505. * Security state must be initialized before device_add() for
  506. * attribute visibility.
  507. */
  508. /* get security state and extended (master) state */
  509. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  510. nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
  511. device_initialize(dev);
  512. lockdep_set_class(&dev->mutex, &nvdimm_key);
  513. if (test_bit(NDD_REGISTER_SYNC, &flags))
  514. nd_device_register_sync(dev);
  515. else
  516. nd_device_register(dev);
  517. return nvdimm;
  518. }
  519. EXPORT_SYMBOL_GPL(__nvdimm_create);
  520. void nvdimm_delete(struct nvdimm *nvdimm)
  521. {
  522. struct device *dev = &nvdimm->dev;
  523. bool dev_put = false;
  524. /* We are shutting down. Make state frozen artificially. */
  525. nvdimm_bus_lock(dev);
  526. set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
  527. if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
  528. dev_put = true;
  529. nvdimm_bus_unlock(dev);
  530. cancel_delayed_work_sync(&nvdimm->dwork);
  531. if (dev_put)
  532. put_device(dev);
  533. nd_device_unregister(dev, ND_SYNC);
  534. }
  535. EXPORT_SYMBOL_GPL(nvdimm_delete);
  536. static void shutdown_security_notify(void *data)
  537. {
  538. struct nvdimm *nvdimm = data;
  539. sysfs_put(nvdimm->sec.overwrite_state);
  540. }
  541. int nvdimm_security_setup_events(struct device *dev)
  542. {
  543. struct nvdimm *nvdimm = to_nvdimm(dev);
  544. if (!nvdimm->sec.flags || !nvdimm->sec.ops
  545. || !nvdimm->sec.ops->overwrite)
  546. return 0;
  547. nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
  548. if (!nvdimm->sec.overwrite_state)
  549. return -ENOMEM;
  550. return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
  551. }
  552. EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
  553. int nvdimm_in_overwrite(struct nvdimm *nvdimm)
  554. {
  555. return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
  556. }
  557. EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
  558. int nvdimm_security_freeze(struct nvdimm *nvdimm)
  559. {
  560. int rc;
  561. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
  562. if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
  563. return -EOPNOTSUPP;
  564. if (!nvdimm->sec.flags)
  565. return -EIO;
  566. if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
  567. dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
  568. return -EBUSY;
  569. }
  570. rc = nvdimm->sec.ops->freeze(nvdimm);
  571. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  572. return rc;
  573. }
  574. static unsigned long dpa_align(struct nd_region *nd_region)
  575. {
  576. struct device *dev = &nd_region->dev;
  577. if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
  578. "bus lock required for capacity provision\n"))
  579. return 0;
  580. if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
  581. % nd_region->ndr_mappings,
  582. "invalid region align %#lx mappings: %d\n",
  583. nd_region->align, nd_region->ndr_mappings))
  584. return 0;
  585. return nd_region->align / nd_region->ndr_mappings;
  586. }
  587. /**
  588. * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
  589. * contiguous unallocated dpa range.
  590. * @nd_region: constrain available space check to this reference region
  591. * @nd_mapping: container of dpa-resource-root + labels
  592. */
  593. resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
  594. struct nd_mapping *nd_mapping)
  595. {
  596. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  597. struct nvdimm_bus *nvdimm_bus;
  598. resource_size_t max = 0;
  599. struct resource *res;
  600. unsigned long align;
  601. /* if a dimm is disabled the available capacity is zero */
  602. if (!ndd)
  603. return 0;
  604. align = dpa_align(nd_region);
  605. if (!align)
  606. return 0;
  607. nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  608. if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
  609. return 0;
  610. for_each_dpa_resource(ndd, res) {
  611. resource_size_t start, end;
  612. if (strcmp(res->name, "pmem-reserve") != 0)
  613. continue;
  614. /* trim free space relative to current alignment setting */
  615. start = ALIGN(res->start, align);
  616. end = ALIGN_DOWN(res->end + 1, align) - 1;
  617. if (end < start)
  618. continue;
  619. if (end - start + 1 > max)
  620. max = end - start + 1;
  621. }
  622. release_free_pmem(nvdimm_bus, nd_mapping);
  623. return max;
  624. }
  625. /**
  626. * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
  627. * @nd_mapping: container of dpa-resource-root + labels
  628. * @nd_region: constrain available space check to this reference region
  629. *
  630. * Validate that a PMEM label, if present, aligns with the start of an
  631. * interleave set.
  632. */
  633. resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
  634. struct nd_mapping *nd_mapping)
  635. {
  636. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  637. resource_size_t map_start, map_end, busy = 0;
  638. struct resource *res;
  639. unsigned long align;
  640. if (!ndd)
  641. return 0;
  642. align = dpa_align(nd_region);
  643. if (!align)
  644. return 0;
  645. map_start = nd_mapping->start;
  646. map_end = map_start + nd_mapping->size - 1;
  647. for_each_dpa_resource(ndd, res) {
  648. resource_size_t start, end;
  649. start = ALIGN_DOWN(res->start, align);
  650. end = ALIGN(res->end + 1, align) - 1;
  651. if (start >= map_start && start < map_end) {
  652. if (end > map_end) {
  653. nd_dbg_dpa(nd_region, ndd, res,
  654. "misaligned to iset\n");
  655. return 0;
  656. }
  657. busy += end - start + 1;
  658. } else if (end >= map_start && end <= map_end) {
  659. busy += end - start + 1;
  660. } else if (map_start > start && map_start < end) {
  661. /* total eclipse of the mapping */
  662. busy += nd_mapping->size;
  663. }
  664. }
  665. if (busy < nd_mapping->size)
  666. return ALIGN_DOWN(nd_mapping->size - busy, align);
  667. return 0;
  668. }
  669. void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
  670. {
  671. WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
  672. kfree(res->name);
  673. __release_region(&ndd->dpa, res->start, resource_size(res));
  674. }
  675. struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
  676. struct nd_label_id *label_id, resource_size_t start,
  677. resource_size_t n)
  678. {
  679. char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
  680. struct resource *res;
  681. if (!name)
  682. return NULL;
  683. WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
  684. res = __request_region(&ndd->dpa, start, n, name, 0);
  685. if (!res)
  686. kfree(name);
  687. return res;
  688. }
  689. /**
  690. * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
  691. * @nvdimm: container of dpa-resource-root + labels
  692. * @label_id: dpa resource name of the form pmem-<human readable uuid>
  693. */
  694. resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
  695. struct nd_label_id *label_id)
  696. {
  697. resource_size_t allocated = 0;
  698. struct resource *res;
  699. for_each_dpa_resource(ndd, res)
  700. if (strcmp(res->name, label_id->id) == 0)
  701. allocated += resource_size(res);
  702. return allocated;
  703. }
  704. static int count_dimms(struct device *dev, void *c)
  705. {
  706. int *count = c;
  707. if (is_nvdimm(dev))
  708. (*count)++;
  709. return 0;
  710. }
  711. int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
  712. {
  713. int count = 0;
  714. /* Flush any possible dimm registration failures */
  715. nd_synchronize();
  716. device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
  717. dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
  718. if (count != dimm_count)
  719. return -ENXIO;
  720. return 0;
  721. }
  722. EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
  723. void __exit nvdimm_devs_exit(void)
  724. {
  725. ida_destroy(&dimm_ida);
  726. }