nd.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #ifndef __ND_H__
  6. #define __ND_H__
  7. #include <linux/libnvdimm.h>
  8. #include <linux/badblocks.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/device.h>
  11. #include <linux/mutex.h>
  12. #include <linux/ndctl.h>
  13. #include <linux/types.h>
  14. #include <linux/nd.h>
  15. #include "label.h"
  16. enum {
  17. /*
  18. * Limits the maximum number of block apertures a dimm can
  19. * support and is an input to the geometry/on-disk-format of a
  20. * BTT instance
  21. */
  22. ND_MAX_LANES = 256,
  23. INT_LBASIZE_ALIGNMENT = 64,
  24. NVDIMM_IO_ATOMIC = 1,
  25. };
  26. struct nvdimm_drvdata {
  27. struct device *dev;
  28. int nslabel_size;
  29. struct nd_cmd_get_config_size nsarea;
  30. void *data;
  31. bool cxl;
  32. int ns_current, ns_next;
  33. struct resource dpa;
  34. struct kref kref;
  35. };
  36. static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
  37. struct nd_namespace_label *nd_label)
  38. {
  39. if (ndd->cxl)
  40. return nd_label->cxl.name;
  41. return nd_label->efi.name;
  42. }
  43. static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
  44. struct nd_namespace_label *nd_label, u8 *name)
  45. {
  46. if (ndd->cxl)
  47. return memcpy(name, nd_label->cxl.name, NSLABEL_NAME_LEN);
  48. return memcpy(name, nd_label->efi.name, NSLABEL_NAME_LEN);
  49. }
  50. static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
  51. struct nd_namespace_label *nd_label, u8 *name)
  52. {
  53. if (!name)
  54. return NULL;
  55. if (ndd->cxl)
  56. return memcpy(nd_label->cxl.name, name, NSLABEL_NAME_LEN);
  57. return memcpy(nd_label->efi.name, name, NSLABEL_NAME_LEN);
  58. }
  59. static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
  60. struct nd_namespace_label *nd_label)
  61. {
  62. if (ndd->cxl)
  63. return __le32_to_cpu(nd_label->cxl.slot);
  64. return __le32_to_cpu(nd_label->efi.slot);
  65. }
  66. static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
  67. struct nd_namespace_label *nd_label, u32 slot)
  68. {
  69. if (ndd->cxl)
  70. nd_label->cxl.slot = __cpu_to_le32(slot);
  71. else
  72. nd_label->efi.slot = __cpu_to_le32(slot);
  73. }
  74. static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
  75. struct nd_namespace_label *nd_label)
  76. {
  77. if (ndd->cxl)
  78. return __le64_to_cpu(nd_label->cxl.checksum);
  79. return __le64_to_cpu(nd_label->efi.checksum);
  80. }
  81. static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
  82. struct nd_namespace_label *nd_label,
  83. u64 checksum)
  84. {
  85. if (ndd->cxl)
  86. nd_label->cxl.checksum = __cpu_to_le64(checksum);
  87. else
  88. nd_label->efi.checksum = __cpu_to_le64(checksum);
  89. }
  90. static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
  91. struct nd_namespace_label *nd_label)
  92. {
  93. if (ndd->cxl)
  94. return __le32_to_cpu(nd_label->cxl.flags);
  95. return __le32_to_cpu(nd_label->efi.flags);
  96. }
  97. static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
  98. struct nd_namespace_label *nd_label, u32 flags)
  99. {
  100. if (ndd->cxl)
  101. nd_label->cxl.flags = __cpu_to_le32(flags);
  102. else
  103. nd_label->efi.flags = __cpu_to_le32(flags);
  104. }
  105. static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
  106. struct nd_namespace_label *nd_label)
  107. {
  108. if (ndd->cxl)
  109. return __le64_to_cpu(nd_label->cxl.dpa);
  110. return __le64_to_cpu(nd_label->efi.dpa);
  111. }
  112. static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
  113. struct nd_namespace_label *nd_label, u64 dpa)
  114. {
  115. if (ndd->cxl)
  116. nd_label->cxl.dpa = __cpu_to_le64(dpa);
  117. else
  118. nd_label->efi.dpa = __cpu_to_le64(dpa);
  119. }
  120. static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
  121. struct nd_namespace_label *nd_label)
  122. {
  123. if (ndd->cxl)
  124. return __le64_to_cpu(nd_label->cxl.rawsize);
  125. return __le64_to_cpu(nd_label->efi.rawsize);
  126. }
  127. static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
  128. struct nd_namespace_label *nd_label,
  129. u64 rawsize)
  130. {
  131. if (ndd->cxl)
  132. nd_label->cxl.rawsize = __cpu_to_le64(rawsize);
  133. else
  134. nd_label->efi.rawsize = __cpu_to_le64(rawsize);
  135. }
  136. static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
  137. struct nd_namespace_label *nd_label)
  138. {
  139. /* WARN future refactor attempts that break this assumption */
  140. if (dev_WARN_ONCE(ndd->dev, ndd->cxl,
  141. "CXL labels do not use the isetcookie concept\n"))
  142. return 0;
  143. return __le64_to_cpu(nd_label->efi.isetcookie);
  144. }
  145. static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
  146. struct nd_namespace_label *nd_label,
  147. u64 isetcookie)
  148. {
  149. if (!ndd->cxl)
  150. nd_label->efi.isetcookie = __cpu_to_le64(isetcookie);
  151. }
  152. static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
  153. struct nd_namespace_label *nd_label,
  154. u64 cookie)
  155. {
  156. /*
  157. * Let the EFI and CXL validation comingle, where fields that
  158. * don't matter to CXL always validate.
  159. */
  160. if (ndd->cxl)
  161. return true;
  162. return cookie == __le64_to_cpu(nd_label->efi.isetcookie);
  163. }
  164. static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
  165. struct nd_namespace_label *nd_label)
  166. {
  167. if (ndd->cxl)
  168. return __le16_to_cpu(nd_label->cxl.position);
  169. return __le16_to_cpu(nd_label->efi.position);
  170. }
  171. static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
  172. struct nd_namespace_label *nd_label,
  173. u16 position)
  174. {
  175. if (ndd->cxl)
  176. nd_label->cxl.position = __cpu_to_le16(position);
  177. else
  178. nd_label->efi.position = __cpu_to_le16(position);
  179. }
  180. static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
  181. struct nd_namespace_label *nd_label)
  182. {
  183. if (ndd->cxl)
  184. return 0;
  185. return __le16_to_cpu(nd_label->efi.nlabel);
  186. }
  187. static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
  188. struct nd_namespace_label *nd_label,
  189. u16 nlabel)
  190. {
  191. if (!ndd->cxl)
  192. nd_label->efi.nlabel = __cpu_to_le16(nlabel);
  193. }
  194. static inline u16 nsl_get_nrange(struct nvdimm_drvdata *ndd,
  195. struct nd_namespace_label *nd_label)
  196. {
  197. if (ndd->cxl)
  198. return __le16_to_cpu(nd_label->cxl.nrange);
  199. return 1;
  200. }
  201. static inline void nsl_set_nrange(struct nvdimm_drvdata *ndd,
  202. struct nd_namespace_label *nd_label,
  203. u16 nrange)
  204. {
  205. if (ndd->cxl)
  206. nd_label->cxl.nrange = __cpu_to_le16(nrange);
  207. }
  208. static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
  209. struct nd_namespace_label *nd_label)
  210. {
  211. /*
  212. * Yes, for some reason the EFI labels convey a massive 64-bit
  213. * lbasize, that got fixed for CXL.
  214. */
  215. if (ndd->cxl)
  216. return __le16_to_cpu(nd_label->cxl.lbasize);
  217. return __le64_to_cpu(nd_label->efi.lbasize);
  218. }
  219. static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
  220. struct nd_namespace_label *nd_label,
  221. u64 lbasize)
  222. {
  223. if (ndd->cxl)
  224. nd_label->cxl.lbasize = __cpu_to_le16(lbasize);
  225. else
  226. nd_label->efi.lbasize = __cpu_to_le64(lbasize);
  227. }
  228. static inline const uuid_t *nsl_get_uuid(struct nvdimm_drvdata *ndd,
  229. struct nd_namespace_label *nd_label,
  230. uuid_t *uuid)
  231. {
  232. if (ndd->cxl)
  233. import_uuid(uuid, nd_label->cxl.uuid);
  234. else
  235. import_uuid(uuid, nd_label->efi.uuid);
  236. return uuid;
  237. }
  238. static inline const uuid_t *nsl_set_uuid(struct nvdimm_drvdata *ndd,
  239. struct nd_namespace_label *nd_label,
  240. const uuid_t *uuid)
  241. {
  242. if (ndd->cxl)
  243. export_uuid(nd_label->cxl.uuid, uuid);
  244. else
  245. export_uuid(nd_label->efi.uuid, uuid);
  246. return uuid;
  247. }
  248. static inline bool nsl_uuid_equal(struct nvdimm_drvdata *ndd,
  249. struct nd_namespace_label *nd_label,
  250. const uuid_t *uuid)
  251. {
  252. uuid_t tmp;
  253. if (ndd->cxl)
  254. import_uuid(&tmp, nd_label->cxl.uuid);
  255. else
  256. import_uuid(&tmp, nd_label->efi.uuid);
  257. return uuid_equal(&tmp, uuid);
  258. }
  259. static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd,
  260. struct nd_namespace_label *nd_label)
  261. {
  262. if (ndd->cxl)
  263. return nd_label->cxl.uuid;
  264. return nd_label->efi.uuid;
  265. }
  266. bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
  267. struct nd_namespace_label *nd_label, guid_t *guid);
  268. enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
  269. struct nd_namespace_label *nd_label);
  270. struct nd_region_data {
  271. int ns_count;
  272. int ns_active;
  273. unsigned int hints_shift;
  274. void __iomem *flush_wpq[];
  275. };
  276. static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
  277. int dimm, int hint)
  278. {
  279. unsigned int num = 1 << ndrd->hints_shift;
  280. unsigned int mask = num - 1;
  281. return ndrd->flush_wpq[dimm * num + (hint & mask)];
  282. }
  283. static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
  284. int hint, void __iomem *flush)
  285. {
  286. unsigned int num = 1 << ndrd->hints_shift;
  287. unsigned int mask = num - 1;
  288. ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
  289. }
  290. static inline struct nd_namespace_index *to_namespace_index(
  291. struct nvdimm_drvdata *ndd, int i)
  292. {
  293. if (i < 0)
  294. return NULL;
  295. return ndd->data + sizeof_namespace_index(ndd) * i;
  296. }
  297. static inline struct nd_namespace_index *to_current_namespace_index(
  298. struct nvdimm_drvdata *ndd)
  299. {
  300. return to_namespace_index(ndd, ndd->ns_current);
  301. }
  302. static inline struct nd_namespace_index *to_next_namespace_index(
  303. struct nvdimm_drvdata *ndd)
  304. {
  305. return to_namespace_index(ndd, ndd->ns_next);
  306. }
  307. unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
  308. #define efi_namespace_label_has(ndd, field) \
  309. (!ndd->cxl && offsetof(struct nvdimm_efi_label, field) \
  310. < sizeof_namespace_label(ndd))
  311. #define nd_dbg_dpa(r, d, res, fmt, arg...) \
  312. dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
  313. (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
  314. (unsigned long long) (res ? resource_size(res) : 0), \
  315. (unsigned long long) (res ? res->start : 0), ##arg)
  316. #define for_each_dpa_resource(ndd, res) \
  317. for (res = (ndd)->dpa.child; res; res = res->sibling)
  318. #define for_each_dpa_resource_safe(ndd, res, next) \
  319. for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
  320. res; res = next, next = next ? next->sibling : NULL)
  321. struct nd_percpu_lane {
  322. int count;
  323. spinlock_t lock;
  324. };
  325. enum nd_label_flags {
  326. ND_LABEL_REAP,
  327. };
  328. struct nd_label_ent {
  329. struct list_head list;
  330. unsigned long flags;
  331. struct nd_namespace_label *label;
  332. };
  333. enum nd_mapping_lock_class {
  334. ND_MAPPING_CLASS0,
  335. ND_MAPPING_UUID_SCAN,
  336. };
  337. struct nd_mapping {
  338. struct nvdimm *nvdimm;
  339. u64 start;
  340. u64 size;
  341. int position;
  342. struct list_head labels;
  343. struct mutex lock;
  344. /*
  345. * @ndd is for private use at region enable / disable time for
  346. * get_ndd() + put_ndd(), all other nd_mapping to ndd
  347. * conversions use to_ndd() which respects enabled state of the
  348. * nvdimm.
  349. */
  350. struct nvdimm_drvdata *ndd;
  351. };
  352. struct nd_region {
  353. struct device dev;
  354. struct ida ns_ida;
  355. struct ida btt_ida;
  356. struct ida pfn_ida;
  357. struct ida dax_ida;
  358. unsigned long flags;
  359. struct device *ns_seed;
  360. struct device *btt_seed;
  361. struct device *pfn_seed;
  362. struct device *dax_seed;
  363. unsigned long align;
  364. u16 ndr_mappings;
  365. u64 ndr_size;
  366. u64 ndr_start;
  367. int id, num_lanes, ro, numa_node, target_node;
  368. void *provider_data;
  369. struct kernfs_node *bb_state;
  370. struct badblocks bb;
  371. struct nd_interleave_set *nd_set;
  372. struct nd_percpu_lane __percpu *lane;
  373. int (*flush)(struct nd_region *nd_region, struct bio *bio);
  374. struct nd_mapping mapping[];
  375. };
  376. static inline bool nsl_validate_nlabel(struct nd_region *nd_region,
  377. struct nvdimm_drvdata *ndd,
  378. struct nd_namespace_label *nd_label)
  379. {
  380. if (ndd->cxl)
  381. return true;
  382. return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings;
  383. }
  384. /*
  385. * Lookup next in the repeating sequence of 01, 10, and 11.
  386. */
  387. static inline unsigned nd_inc_seq(unsigned seq)
  388. {
  389. static const unsigned next[] = { 0, 2, 3, 1 };
  390. return next[seq & 3];
  391. }
  392. struct btt;
  393. struct nd_btt {
  394. struct device dev;
  395. struct nd_namespace_common *ndns;
  396. struct btt *btt;
  397. unsigned long lbasize;
  398. u64 size;
  399. uuid_t *uuid;
  400. int id;
  401. int initial_offset;
  402. u16 version_major;
  403. u16 version_minor;
  404. };
  405. enum nd_pfn_mode {
  406. PFN_MODE_NONE,
  407. PFN_MODE_RAM,
  408. PFN_MODE_PMEM,
  409. };
  410. struct nd_pfn {
  411. int id;
  412. uuid_t *uuid;
  413. struct device dev;
  414. unsigned long align;
  415. unsigned long npfns;
  416. enum nd_pfn_mode mode;
  417. struct nd_pfn_sb *pfn_sb;
  418. struct nd_namespace_common *ndns;
  419. };
  420. struct nd_dax {
  421. struct nd_pfn nd_pfn;
  422. };
  423. static inline u32 nd_info_block_reserve(void)
  424. {
  425. return ALIGN(SZ_8K, PAGE_SIZE);
  426. }
  427. enum nd_async_mode {
  428. ND_SYNC,
  429. ND_ASYNC,
  430. };
  431. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
  432. void wait_nvdimm_bus_probe_idle(struct device *dev);
  433. void nd_device_register(struct device *dev);
  434. void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
  435. void nd_device_notify(struct device *dev, enum nvdimm_event event);
  436. int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
  437. size_t len);
  438. ssize_t nd_size_select_show(unsigned long current_size,
  439. const unsigned long *supported, char *buf);
  440. ssize_t nd_size_select_store(struct device *dev, const char *buf,
  441. unsigned long *current_size, const unsigned long *supported);
  442. int __init nvdimm_init(void);
  443. int __init nd_region_init(void);
  444. int __init nd_label_init(void);
  445. void nvdimm_exit(void);
  446. void nd_region_exit(void);
  447. struct nvdimm;
  448. extern const struct attribute_group nd_device_attribute_group;
  449. extern const struct attribute_group nd_numa_attribute_group;
  450. extern const struct attribute_group *nvdimm_bus_attribute_groups[];
  451. struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
  452. int nvdimm_check_config_data(struct device *dev);
  453. int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
  454. int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
  455. int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
  456. size_t offset, size_t len);
  457. int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
  458. void *buf, size_t len);
  459. long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
  460. unsigned int len);
  461. void nvdimm_set_labeling(struct device *dev);
  462. void nvdimm_set_locked(struct device *dev);
  463. void nvdimm_clear_locked(struct device *dev);
  464. int nvdimm_security_setup_events(struct device *dev);
  465. #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
  466. int nvdimm_security_unlock(struct device *dev);
  467. #else
  468. static inline int nvdimm_security_unlock(struct device *dev)
  469. {
  470. return 0;
  471. }
  472. #endif
  473. struct nd_btt *to_nd_btt(struct device *dev);
  474. struct nd_gen_sb {
  475. char reserved[SZ_4K - 8];
  476. __le64 checksum;
  477. };
  478. u64 nd_sb_checksum(struct nd_gen_sb *sb);
  479. #if IS_ENABLED(CONFIG_BTT)
  480. int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
  481. bool is_nd_btt(struct device *dev);
  482. struct device *nd_btt_create(struct nd_region *nd_region);
  483. #else
  484. static inline int nd_btt_probe(struct device *dev,
  485. struct nd_namespace_common *ndns)
  486. {
  487. return -ENODEV;
  488. }
  489. static inline bool is_nd_btt(struct device *dev)
  490. {
  491. return false;
  492. }
  493. static inline struct device *nd_btt_create(struct nd_region *nd_region)
  494. {
  495. return NULL;
  496. }
  497. #endif
  498. struct nd_pfn *to_nd_pfn(struct device *dev);
  499. #if IS_ENABLED(CONFIG_NVDIMM_PFN)
  500. #define MAX_NVDIMM_ALIGN 4
  501. int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
  502. bool is_nd_pfn(struct device *dev);
  503. struct device *nd_pfn_create(struct nd_region *nd_region);
  504. struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
  505. struct nd_namespace_common *ndns);
  506. int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
  507. extern const struct attribute_group *nd_pfn_attribute_groups[];
  508. #else
  509. static inline int nd_pfn_probe(struct device *dev,
  510. struct nd_namespace_common *ndns)
  511. {
  512. return -ENODEV;
  513. }
  514. static inline bool is_nd_pfn(struct device *dev)
  515. {
  516. return false;
  517. }
  518. static inline struct device *nd_pfn_create(struct nd_region *nd_region)
  519. {
  520. return NULL;
  521. }
  522. static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
  523. {
  524. return -ENODEV;
  525. }
  526. #endif
  527. struct nd_dax *to_nd_dax(struct device *dev);
  528. #if IS_ENABLED(CONFIG_NVDIMM_DAX)
  529. int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
  530. bool is_nd_dax(struct device *dev);
  531. struct device *nd_dax_create(struct nd_region *nd_region);
  532. #else
  533. static inline int nd_dax_probe(struct device *dev,
  534. struct nd_namespace_common *ndns)
  535. {
  536. return -ENODEV;
  537. }
  538. static inline bool is_nd_dax(struct device *dev)
  539. {
  540. return false;
  541. }
  542. static inline struct device *nd_dax_create(struct nd_region *nd_region)
  543. {
  544. return NULL;
  545. }
  546. #endif
  547. int nd_region_to_nstype(struct nd_region *nd_region);
  548. int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
  549. u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
  550. struct nd_namespace_index *nsindex);
  551. u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
  552. void nvdimm_bus_lock(struct device *dev);
  553. void nvdimm_bus_unlock(struct device *dev);
  554. bool is_nvdimm_bus_locked(struct device *dev);
  555. void nvdimm_check_and_set_ro(struct gendisk *disk);
  556. void nvdimm_drvdata_release(struct kref *kref);
  557. void put_ndd(struct nvdimm_drvdata *ndd);
  558. int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
  559. void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
  560. struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
  561. struct nd_label_id *label_id, resource_size_t start,
  562. resource_size_t n);
  563. resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
  564. bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
  565. struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
  566. int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
  567. int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
  568. const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
  569. char *name);
  570. unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
  571. struct range;
  572. void nvdimm_badblocks_populate(struct nd_region *nd_region,
  573. struct badblocks *bb, const struct range *range);
  574. int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
  575. resource_size_t size);
  576. void devm_namespace_disable(struct device *dev,
  577. struct nd_namespace_common *ndns);
  578. #if IS_ENABLED(CONFIG_ND_CLAIM)
  579. /* max struct page size independent of kernel config */
  580. #define MAX_STRUCT_PAGE_SIZE 64
  581. int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
  582. #else
  583. static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
  584. struct dev_pagemap *pgmap)
  585. {
  586. return -ENXIO;
  587. }
  588. #endif
  589. int nd_region_activate(struct nd_region *nd_region);
  590. static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
  591. unsigned int len)
  592. {
  593. if (bb->count) {
  594. sector_t first_bad;
  595. int num_bad;
  596. return !!badblocks_check(bb, sector, len / 512, &first_bad,
  597. &num_bad);
  598. }
  599. return false;
  600. }
  601. const uuid_t *nd_dev_to_uuid(struct device *dev);
  602. bool pmem_should_map_pages(struct device *dev);
  603. #endif /* __ND_H__ */