core.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/libnvdimm.h>
  6. #include <linux/suspend.h>
  7. #include <linux/export.h>
  8. #include <linux/module.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/blk-integrity.h>
  11. #include <linux/device.h>
  12. #include <linux/ctype.h>
  13. #include <linux/ndctl.h>
  14. #include <linux/mutex.h>
  15. #include <linux/slab.h>
  16. #include <linux/io.h>
  17. #include "nd-core.h"
  18. #include "nd.h"
  19. LIST_HEAD(nvdimm_bus_list);
  20. DEFINE_MUTEX(nvdimm_bus_list_mutex);
  21. void nvdimm_bus_lock(struct device *dev)
  22. {
  23. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  24. if (!nvdimm_bus)
  25. return;
  26. mutex_lock(&nvdimm_bus->reconfig_mutex);
  27. }
  28. EXPORT_SYMBOL(nvdimm_bus_lock);
  29. void nvdimm_bus_unlock(struct device *dev)
  30. {
  31. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  32. if (!nvdimm_bus)
  33. return;
  34. mutex_unlock(&nvdimm_bus->reconfig_mutex);
  35. }
  36. EXPORT_SYMBOL(nvdimm_bus_unlock);
  37. bool is_nvdimm_bus_locked(struct device *dev)
  38. {
  39. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  40. if (!nvdimm_bus)
  41. return false;
  42. return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
  43. }
  44. EXPORT_SYMBOL(is_nvdimm_bus_locked);
  45. struct nvdimm_map {
  46. struct nvdimm_bus *nvdimm_bus;
  47. struct list_head list;
  48. resource_size_t offset;
  49. unsigned long flags;
  50. size_t size;
  51. union {
  52. void *mem;
  53. void __iomem *iomem;
  54. };
  55. struct kref kref;
  56. };
  57. static struct nvdimm_map *find_nvdimm_map(struct device *dev,
  58. resource_size_t offset)
  59. {
  60. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  61. struct nvdimm_map *nvdimm_map;
  62. list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
  63. if (nvdimm_map->offset == offset)
  64. return nvdimm_map;
  65. return NULL;
  66. }
  67. static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
  68. resource_size_t offset, size_t size, unsigned long flags)
  69. {
  70. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  71. struct nvdimm_map *nvdimm_map;
  72. nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
  73. if (!nvdimm_map)
  74. return NULL;
  75. INIT_LIST_HEAD(&nvdimm_map->list);
  76. nvdimm_map->nvdimm_bus = nvdimm_bus;
  77. nvdimm_map->offset = offset;
  78. nvdimm_map->flags = flags;
  79. nvdimm_map->size = size;
  80. kref_init(&nvdimm_map->kref);
  81. if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
  82. dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
  83. &offset, size, dev_name(dev));
  84. goto err_request_region;
  85. }
  86. if (flags)
  87. nvdimm_map->mem = memremap(offset, size, flags);
  88. else
  89. nvdimm_map->iomem = ioremap(offset, size);
  90. if (!nvdimm_map->mem)
  91. goto err_map;
  92. dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
  93. __func__);
  94. list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
  95. return nvdimm_map;
  96. err_map:
  97. release_mem_region(offset, size);
  98. err_request_region:
  99. kfree(nvdimm_map);
  100. return NULL;
  101. }
  102. static void nvdimm_map_release(struct kref *kref)
  103. {
  104. struct nvdimm_bus *nvdimm_bus;
  105. struct nvdimm_map *nvdimm_map;
  106. nvdimm_map = container_of(kref, struct nvdimm_map, kref);
  107. nvdimm_bus = nvdimm_map->nvdimm_bus;
  108. dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
  109. list_del(&nvdimm_map->list);
  110. if (nvdimm_map->flags)
  111. memunmap(nvdimm_map->mem);
  112. else
  113. iounmap(nvdimm_map->iomem);
  114. release_mem_region(nvdimm_map->offset, nvdimm_map->size);
  115. kfree(nvdimm_map);
  116. }
  117. static void nvdimm_map_put(void *data)
  118. {
  119. struct nvdimm_map *nvdimm_map = data;
  120. struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
  121. nvdimm_bus_lock(&nvdimm_bus->dev);
  122. kref_put(&nvdimm_map->kref, nvdimm_map_release);
  123. nvdimm_bus_unlock(&nvdimm_bus->dev);
  124. }
  125. /**
  126. * devm_nvdimm_memremap - map a resource that is shared across regions
  127. * @dev: device that will own a reference to the shared mapping
  128. * @offset: physical base address of the mapping
  129. * @size: mapping size
  130. * @flags: memremap flags, or, if zero, perform an ioremap instead
  131. */
  132. void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
  133. size_t size, unsigned long flags)
  134. {
  135. struct nvdimm_map *nvdimm_map;
  136. nvdimm_bus_lock(dev);
  137. nvdimm_map = find_nvdimm_map(dev, offset);
  138. if (!nvdimm_map)
  139. nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
  140. else
  141. kref_get(&nvdimm_map->kref);
  142. nvdimm_bus_unlock(dev);
  143. if (!nvdimm_map)
  144. return NULL;
  145. if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
  146. return NULL;
  147. return nvdimm_map->mem;
  148. }
  149. EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
  150. u64 nd_fletcher64(void *addr, size_t len, bool le)
  151. {
  152. u32 *buf = addr;
  153. u32 lo32 = 0;
  154. u64 hi32 = 0;
  155. int i;
  156. for (i = 0; i < len / sizeof(u32); i++) {
  157. lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
  158. hi32 += lo32;
  159. }
  160. return hi32 << 32 | lo32;
  161. }
  162. EXPORT_SYMBOL_GPL(nd_fletcher64);
  163. struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
  164. {
  165. /* struct nvdimm_bus definition is private to libnvdimm */
  166. return nvdimm_bus->nd_desc;
  167. }
  168. EXPORT_SYMBOL_GPL(to_nd_desc);
  169. struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
  170. {
  171. /* struct nvdimm_bus definition is private to libnvdimm */
  172. return &nvdimm_bus->dev;
  173. }
  174. EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
  175. /**
  176. * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
  177. * @dev: container device for the uuid property
  178. * @uuid_out: uuid buffer to replace
  179. * @buf: raw sysfs buffer to parse
  180. *
  181. * Enforce that uuids can only be changed while the device is disabled
  182. * (driver detached)
  183. * LOCKING: expects device_lock() is held on entry
  184. */
  185. int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
  186. size_t len)
  187. {
  188. uuid_t uuid;
  189. int rc;
  190. if (dev->driver)
  191. return -EBUSY;
  192. rc = uuid_parse(buf, &uuid);
  193. if (rc)
  194. return rc;
  195. kfree(*uuid_out);
  196. *uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL);
  197. if (!(*uuid_out))
  198. return -ENOMEM;
  199. return 0;
  200. }
  201. ssize_t nd_size_select_show(unsigned long current_size,
  202. const unsigned long *supported, char *buf)
  203. {
  204. ssize_t len = 0;
  205. int i;
  206. for (i = 0; supported[i]; i++)
  207. if (current_size == supported[i])
  208. len += sprintf(buf + len, "[%ld] ", supported[i]);
  209. else
  210. len += sprintf(buf + len, "%ld ", supported[i]);
  211. len += sprintf(buf + len, "\n");
  212. return len;
  213. }
  214. ssize_t nd_size_select_store(struct device *dev, const char *buf,
  215. unsigned long *current_size, const unsigned long *supported)
  216. {
  217. unsigned long lbasize;
  218. int rc, i;
  219. if (dev->driver)
  220. return -EBUSY;
  221. rc = kstrtoul(buf, 0, &lbasize);
  222. if (rc)
  223. return rc;
  224. for (i = 0; supported[i]; i++)
  225. if (lbasize == supported[i])
  226. break;
  227. if (supported[i]) {
  228. *current_size = lbasize;
  229. return 0;
  230. } else {
  231. return -EINVAL;
  232. }
  233. }
  234. static ssize_t commands_show(struct device *dev,
  235. struct device_attribute *attr, char *buf)
  236. {
  237. int cmd, len = 0;
  238. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  239. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  240. for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
  241. len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
  242. len += sprintf(buf + len, "\n");
  243. return len;
  244. }
  245. static DEVICE_ATTR_RO(commands);
  246. static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
  247. {
  248. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  249. struct device *parent = nvdimm_bus->dev.parent;
  250. if (nd_desc->provider_name)
  251. return nd_desc->provider_name;
  252. else if (parent)
  253. return dev_name(parent);
  254. else
  255. return "unknown";
  256. }
  257. static ssize_t provider_show(struct device *dev,
  258. struct device_attribute *attr, char *buf)
  259. {
  260. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  261. return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
  262. }
  263. static DEVICE_ATTR_RO(provider);
  264. static int flush_namespaces(struct device *dev, void *data)
  265. {
  266. device_lock(dev);
  267. device_unlock(dev);
  268. return 0;
  269. }
  270. static int flush_regions_dimms(struct device *dev, void *data)
  271. {
  272. device_lock(dev);
  273. device_unlock(dev);
  274. device_for_each_child(dev, NULL, flush_namespaces);
  275. return 0;
  276. }
  277. static ssize_t wait_probe_show(struct device *dev,
  278. struct device_attribute *attr, char *buf)
  279. {
  280. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  281. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  282. int rc;
  283. if (nd_desc->flush_probe) {
  284. rc = nd_desc->flush_probe(nd_desc);
  285. if (rc)
  286. return rc;
  287. }
  288. nd_synchronize();
  289. device_for_each_child(dev, NULL, flush_regions_dimms);
  290. return sprintf(buf, "1\n");
  291. }
  292. static DEVICE_ATTR_RO(wait_probe);
  293. static struct attribute *nvdimm_bus_attributes[] = {
  294. &dev_attr_commands.attr,
  295. &dev_attr_wait_probe.attr,
  296. &dev_attr_provider.attr,
  297. NULL,
  298. };
  299. static const struct attribute_group nvdimm_bus_attribute_group = {
  300. .attrs = nvdimm_bus_attributes,
  301. };
  302. static ssize_t capability_show(struct device *dev,
  303. struct device_attribute *attr, char *buf)
  304. {
  305. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  306. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  307. enum nvdimm_fwa_capability cap;
  308. if (!nd_desc->fw_ops)
  309. return -EOPNOTSUPP;
  310. cap = nd_desc->fw_ops->capability(nd_desc);
  311. switch (cap) {
  312. case NVDIMM_FWA_CAP_QUIESCE:
  313. return sprintf(buf, "quiesce\n");
  314. case NVDIMM_FWA_CAP_LIVE:
  315. return sprintf(buf, "live\n");
  316. default:
  317. return -EOPNOTSUPP;
  318. }
  319. }
  320. static DEVICE_ATTR_RO(capability);
  321. static ssize_t activate_show(struct device *dev,
  322. struct device_attribute *attr, char *buf)
  323. {
  324. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  325. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  326. enum nvdimm_fwa_capability cap;
  327. enum nvdimm_fwa_state state;
  328. if (!nd_desc->fw_ops)
  329. return -EOPNOTSUPP;
  330. cap = nd_desc->fw_ops->capability(nd_desc);
  331. state = nd_desc->fw_ops->activate_state(nd_desc);
  332. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  333. return -EOPNOTSUPP;
  334. switch (state) {
  335. case NVDIMM_FWA_IDLE:
  336. return sprintf(buf, "idle\n");
  337. case NVDIMM_FWA_BUSY:
  338. return sprintf(buf, "busy\n");
  339. case NVDIMM_FWA_ARMED:
  340. return sprintf(buf, "armed\n");
  341. case NVDIMM_FWA_ARM_OVERFLOW:
  342. return sprintf(buf, "overflow\n");
  343. default:
  344. return -ENXIO;
  345. }
  346. }
  347. static int exec_firmware_activate(void *data)
  348. {
  349. struct nvdimm_bus_descriptor *nd_desc = data;
  350. return nd_desc->fw_ops->activate(nd_desc);
  351. }
  352. static ssize_t activate_store(struct device *dev,
  353. struct device_attribute *attr, const char *buf, size_t len)
  354. {
  355. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  356. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  357. enum nvdimm_fwa_state state;
  358. bool quiesce;
  359. ssize_t rc;
  360. if (!nd_desc->fw_ops)
  361. return -EOPNOTSUPP;
  362. if (sysfs_streq(buf, "live"))
  363. quiesce = false;
  364. else if (sysfs_streq(buf, "quiesce"))
  365. quiesce = true;
  366. else
  367. return -EINVAL;
  368. state = nd_desc->fw_ops->activate_state(nd_desc);
  369. switch (state) {
  370. case NVDIMM_FWA_BUSY:
  371. rc = -EBUSY;
  372. break;
  373. case NVDIMM_FWA_ARMED:
  374. case NVDIMM_FWA_ARM_OVERFLOW:
  375. if (quiesce)
  376. rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc);
  377. else
  378. rc = nd_desc->fw_ops->activate(nd_desc);
  379. break;
  380. case NVDIMM_FWA_IDLE:
  381. default:
  382. rc = -ENXIO;
  383. }
  384. if (rc == 0)
  385. rc = len;
  386. return rc;
  387. }
  388. static DEVICE_ATTR_ADMIN_RW(activate);
  389. static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
  390. {
  391. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  392. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  393. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  394. enum nvdimm_fwa_capability cap;
  395. /*
  396. * Both 'activate' and 'capability' disappear when no ops
  397. * detected, or a negative capability is indicated.
  398. */
  399. if (!nd_desc->fw_ops)
  400. return 0;
  401. cap = nd_desc->fw_ops->capability(nd_desc);
  402. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  403. return 0;
  404. return a->mode;
  405. }
  406. static struct attribute *nvdimm_bus_firmware_attributes[] = {
  407. &dev_attr_activate.attr,
  408. &dev_attr_capability.attr,
  409. NULL,
  410. };
  411. static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
  412. .name = "firmware",
  413. .attrs = nvdimm_bus_firmware_attributes,
  414. .is_visible = nvdimm_bus_firmware_visible,
  415. };
  416. const struct attribute_group *nvdimm_bus_attribute_groups[] = {
  417. &nvdimm_bus_attribute_group,
  418. &nvdimm_bus_firmware_attribute_group,
  419. NULL,
  420. };
  421. int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  422. {
  423. return badrange_add(&nvdimm_bus->badrange, addr, length);
  424. }
  425. EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
  426. #ifdef CONFIG_BLK_DEV_INTEGRITY
  427. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  428. {
  429. struct blk_integrity bi;
  430. if (meta_size == 0)
  431. return 0;
  432. memset(&bi, 0, sizeof(bi));
  433. bi.tuple_size = meta_size;
  434. bi.tag_size = meta_size;
  435. blk_integrity_register(disk, &bi);
  436. blk_queue_max_integrity_segments(disk->queue, 1);
  437. return 0;
  438. }
  439. EXPORT_SYMBOL(nd_integrity_init);
  440. #else /* CONFIG_BLK_DEV_INTEGRITY */
  441. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  442. {
  443. return 0;
  444. }
  445. EXPORT_SYMBOL(nd_integrity_init);
  446. #endif
  447. static __init int libnvdimm_init(void)
  448. {
  449. int rc;
  450. rc = nvdimm_bus_init();
  451. if (rc)
  452. return rc;
  453. rc = nvdimm_init();
  454. if (rc)
  455. goto err_dimm;
  456. rc = nd_region_init();
  457. if (rc)
  458. goto err_region;
  459. nd_label_init();
  460. return 0;
  461. err_region:
  462. nvdimm_exit();
  463. err_dimm:
  464. nvdimm_bus_exit();
  465. return rc;
  466. }
  467. static __exit void libnvdimm_exit(void)
  468. {
  469. WARN_ON(!list_empty(&nvdimm_bus_list));
  470. nd_region_exit();
  471. nvdimm_exit();
  472. nvdimm_bus_exit();
  473. nvdimm_devs_exit();
  474. }
  475. MODULE_LICENSE("GPL v2");
  476. MODULE_AUTHOR("Intel Corporation");
  477. subsys_initcall(libnvdimm_init);
  478. module_exit(libnvdimm_exit);