ndtest.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  3. #include <linux/platform_device.h>
  4. #include <linux/device.h>
  5. #include <linux/module.h>
  6. #include <linux/genalloc.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/list_sort.h>
  10. #include <linux/libnvdimm.h>
  11. #include <linux/ndctl.h>
  12. #include <nd-core.h>
  13. #include <linux/printk.h>
  14. #include <linux/seq_buf.h>
  15. #include "../watermark.h"
  16. #include "nfit_test.h"
  17. #include "ndtest.h"
  18. enum {
  19. DIMM_SIZE = SZ_32M,
  20. LABEL_SIZE = SZ_128K,
  21. NUM_INSTANCES = 2,
  22. NUM_DCR = 4,
  23. NDTEST_MAX_MAPPING = 6,
  24. };
  25. #define NDTEST_SCM_DIMM_CMD_MASK \
  26. ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
  27. (1ul << ND_CMD_GET_CONFIG_DATA) | \
  28. (1ul << ND_CMD_SET_CONFIG_DATA) | \
  29. (1ul << ND_CMD_CALL))
  30. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  31. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  32. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  33. static DEFINE_SPINLOCK(ndtest_lock);
  34. static struct ndtest_priv *instances[NUM_INSTANCES];
  35. static struct class *ndtest_dimm_class;
  36. static struct gen_pool *ndtest_pool;
  37. static struct ndtest_dimm dimm_group1[] = {
  38. {
  39. .size = DIMM_SIZE,
  40. .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  41. .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
  42. .physical_id = 0,
  43. .num_formats = 2,
  44. },
  45. {
  46. .size = DIMM_SIZE,
  47. .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  48. .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
  49. .physical_id = 1,
  50. .num_formats = 2,
  51. },
  52. {
  53. .size = DIMM_SIZE,
  54. .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  55. .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
  56. .physical_id = 2,
  57. .num_formats = 2,
  58. },
  59. {
  60. .size = DIMM_SIZE,
  61. .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  62. .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
  63. .physical_id = 3,
  64. .num_formats = 2,
  65. },
  66. {
  67. .size = DIMM_SIZE,
  68. .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  69. .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
  70. .physical_id = 4,
  71. .num_formats = 2,
  72. },
  73. };
  74. static struct ndtest_dimm dimm_group2[] = {
  75. {
  76. .size = DIMM_SIZE,
  77. .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
  78. .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
  79. .physical_id = 0,
  80. .num_formats = 1,
  81. .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
  82. PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
  83. PAPR_PMEM_HEALTH_FATAL,
  84. },
  85. };
  86. static struct ndtest_mapping region0_mapping[] = {
  87. {
  88. .dimm = 0,
  89. .position = 0,
  90. .start = 0,
  91. .size = SZ_16M,
  92. },
  93. {
  94. .dimm = 1,
  95. .position = 1,
  96. .start = 0,
  97. .size = SZ_16M,
  98. }
  99. };
  100. static struct ndtest_mapping region1_mapping[] = {
  101. {
  102. .dimm = 0,
  103. .position = 0,
  104. .start = SZ_16M,
  105. .size = SZ_16M,
  106. },
  107. {
  108. .dimm = 1,
  109. .position = 1,
  110. .start = SZ_16M,
  111. .size = SZ_16M,
  112. },
  113. {
  114. .dimm = 2,
  115. .position = 2,
  116. .start = SZ_16M,
  117. .size = SZ_16M,
  118. },
  119. {
  120. .dimm = 3,
  121. .position = 3,
  122. .start = SZ_16M,
  123. .size = SZ_16M,
  124. },
  125. };
  126. static struct ndtest_region bus0_regions[] = {
  127. {
  128. .type = ND_DEVICE_NAMESPACE_PMEM,
  129. .num_mappings = ARRAY_SIZE(region0_mapping),
  130. .mapping = region0_mapping,
  131. .size = DIMM_SIZE,
  132. .range_index = 1,
  133. },
  134. {
  135. .type = ND_DEVICE_NAMESPACE_PMEM,
  136. .num_mappings = ARRAY_SIZE(region1_mapping),
  137. .mapping = region1_mapping,
  138. .size = DIMM_SIZE * 2,
  139. .range_index = 2,
  140. },
  141. };
  142. static struct ndtest_mapping region6_mapping[] = {
  143. {
  144. .dimm = 0,
  145. .position = 0,
  146. .start = 0,
  147. .size = DIMM_SIZE,
  148. },
  149. };
  150. static struct ndtest_region bus1_regions[] = {
  151. {
  152. .type = ND_DEVICE_NAMESPACE_IO,
  153. .num_mappings = ARRAY_SIZE(region6_mapping),
  154. .mapping = region6_mapping,
  155. .size = DIMM_SIZE,
  156. .range_index = 1,
  157. },
  158. };
  159. static struct ndtest_config bus_configs[NUM_INSTANCES] = {
  160. /* bus 1 */
  161. {
  162. .dimm_start = 0,
  163. .dimm_count = ARRAY_SIZE(dimm_group1),
  164. .dimms = dimm_group1,
  165. .regions = bus0_regions,
  166. .num_regions = ARRAY_SIZE(bus0_regions),
  167. },
  168. /* bus 2 */
  169. {
  170. .dimm_start = ARRAY_SIZE(dimm_group1),
  171. .dimm_count = ARRAY_SIZE(dimm_group2),
  172. .dimms = dimm_group2,
  173. .regions = bus1_regions,
  174. .num_regions = ARRAY_SIZE(bus1_regions),
  175. },
  176. };
  177. static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
  178. {
  179. struct platform_device *pdev = to_platform_device(dev);
  180. return container_of(pdev, struct ndtest_priv, pdev);
  181. }
  182. static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
  183. struct nd_cmd_get_config_data_hdr *hdr)
  184. {
  185. unsigned int len;
  186. if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
  187. return -EINVAL;
  188. hdr->status = 0;
  189. len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
  190. memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
  191. return buf_len - len;
  192. }
  193. static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
  194. struct nd_cmd_set_config_hdr *hdr)
  195. {
  196. unsigned int len;
  197. if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
  198. return -EINVAL;
  199. len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
  200. memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
  201. return buf_len - len;
  202. }
  203. static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
  204. struct nd_cmd_get_config_size *size)
  205. {
  206. size->status = 0;
  207. size->max_xfer = 8;
  208. size->config_size = dimm->config_size;
  209. return 0;
  210. }
  211. static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
  212. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  213. unsigned int buf_len, int *cmd_rc)
  214. {
  215. struct ndtest_dimm *dimm;
  216. int _cmd_rc;
  217. if (!cmd_rc)
  218. cmd_rc = &_cmd_rc;
  219. *cmd_rc = 0;
  220. if (!nvdimm)
  221. return -EINVAL;
  222. dimm = nvdimm_provider_data(nvdimm);
  223. if (!dimm)
  224. return -EINVAL;
  225. switch (cmd) {
  226. case ND_CMD_GET_CONFIG_SIZE:
  227. *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
  228. break;
  229. case ND_CMD_GET_CONFIG_DATA:
  230. *cmd_rc = ndtest_config_get(dimm, buf_len, buf);
  231. break;
  232. case ND_CMD_SET_CONFIG_DATA:
  233. *cmd_rc = ndtest_config_set(dimm, buf_len, buf);
  234. break;
  235. default:
  236. return -EINVAL;
  237. }
  238. /* Failures for a DIMM can be injected using fail_cmd and
  239. * fail_cmd_code, see the device attributes below
  240. */
  241. if ((1 << cmd) & dimm->fail_cmd)
  242. return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
  243. return 0;
  244. }
  245. static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
  246. {
  247. int i;
  248. for (i = 0; i < NUM_INSTANCES; i++) {
  249. struct nfit_test_resource *n, *nfit_res = NULL;
  250. struct ndtest_priv *t = instances[i];
  251. if (!t)
  252. continue;
  253. spin_lock(&ndtest_lock);
  254. list_for_each_entry(n, &t->resources, list) {
  255. if (addr >= n->res.start && (addr < n->res.start
  256. + resource_size(&n->res))) {
  257. nfit_res = n;
  258. break;
  259. } else if (addr >= (unsigned long) n->buf
  260. && (addr < (unsigned long) n->buf
  261. + resource_size(&n->res))) {
  262. nfit_res = n;
  263. break;
  264. }
  265. }
  266. spin_unlock(&ndtest_lock);
  267. if (nfit_res)
  268. return nfit_res;
  269. }
  270. pr_warn("Failed to get resource\n");
  271. return NULL;
  272. }
  273. static void ndtest_release_resource(void *data)
  274. {
  275. struct nfit_test_resource *res = data;
  276. spin_lock(&ndtest_lock);
  277. list_del(&res->list);
  278. spin_unlock(&ndtest_lock);
  279. if (resource_size(&res->res) >= DIMM_SIZE)
  280. gen_pool_free(ndtest_pool, res->res.start,
  281. resource_size(&res->res));
  282. vfree(res->buf);
  283. kfree(res);
  284. }
  285. static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
  286. dma_addr_t *dma)
  287. {
  288. dma_addr_t __dma;
  289. void *buf;
  290. struct nfit_test_resource *res;
  291. struct genpool_data_align data = {
  292. .align = SZ_128M,
  293. };
  294. res = kzalloc(sizeof(*res), GFP_KERNEL);
  295. if (!res)
  296. return NULL;
  297. buf = vmalloc(size);
  298. if (size >= DIMM_SIZE)
  299. __dma = gen_pool_alloc_algo(ndtest_pool, size,
  300. gen_pool_first_fit_align, &data);
  301. else
  302. __dma = (unsigned long) buf;
  303. if (!__dma)
  304. goto buf_err;
  305. INIT_LIST_HEAD(&res->list);
  306. res->dev = &p->pdev.dev;
  307. res->buf = buf;
  308. res->res.start = __dma;
  309. res->res.end = __dma + size - 1;
  310. res->res.name = "NFIT";
  311. spin_lock_init(&res->lock);
  312. INIT_LIST_HEAD(&res->requests);
  313. spin_lock(&ndtest_lock);
  314. list_add(&res->list, &p->resources);
  315. spin_unlock(&ndtest_lock);
  316. if (dma)
  317. *dma = __dma;
  318. if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
  319. return res->buf;
  320. buf_err:
  321. if (__dma && size >= DIMM_SIZE)
  322. gen_pool_free(ndtest_pool, __dma, size);
  323. if (buf)
  324. vfree(buf);
  325. kfree(res);
  326. return NULL;
  327. }
  328. static ssize_t range_index_show(struct device *dev,
  329. struct device_attribute *attr, char *buf)
  330. {
  331. struct nd_region *nd_region = to_nd_region(dev);
  332. struct ndtest_region *region = nd_region_provider_data(nd_region);
  333. return sprintf(buf, "%d\n", region->range_index);
  334. }
  335. static DEVICE_ATTR_RO(range_index);
  336. static struct attribute *ndtest_region_attributes[] = {
  337. &dev_attr_range_index.attr,
  338. NULL,
  339. };
  340. static const struct attribute_group ndtest_region_attribute_group = {
  341. .name = "papr",
  342. .attrs = ndtest_region_attributes,
  343. };
  344. static const struct attribute_group *ndtest_region_attribute_groups[] = {
  345. &ndtest_region_attribute_group,
  346. NULL,
  347. };
  348. static int ndtest_create_region(struct ndtest_priv *p,
  349. struct ndtest_region *region)
  350. {
  351. struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
  352. struct nd_region_desc *ndr_desc, _ndr_desc;
  353. struct nd_interleave_set *nd_set;
  354. struct resource res;
  355. int i, ndimm = region->mapping[0].dimm;
  356. u64 uuid[2];
  357. memset(&res, 0, sizeof(res));
  358. memset(&mappings, 0, sizeof(mappings));
  359. memset(&_ndr_desc, 0, sizeof(_ndr_desc));
  360. ndr_desc = &_ndr_desc;
  361. if (!ndtest_alloc_resource(p, region->size, &res.start))
  362. return -ENOMEM;
  363. res.end = res.start + region->size - 1;
  364. ndr_desc->mapping = mappings;
  365. ndr_desc->res = &res;
  366. ndr_desc->provider_data = region;
  367. ndr_desc->attr_groups = ndtest_region_attribute_groups;
  368. if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
  369. pr_err("failed to parse UUID\n");
  370. return -ENXIO;
  371. }
  372. nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
  373. if (!nd_set)
  374. return -ENOMEM;
  375. nd_set->cookie1 = cpu_to_le64(uuid[0]);
  376. nd_set->cookie2 = cpu_to_le64(uuid[1]);
  377. nd_set->altcookie = nd_set->cookie1;
  378. ndr_desc->nd_set = nd_set;
  379. for (i = 0; i < region->num_mappings; i++) {
  380. ndimm = region->mapping[i].dimm;
  381. mappings[i].start = region->mapping[i].start;
  382. mappings[i].size = region->mapping[i].size;
  383. mappings[i].position = region->mapping[i].position;
  384. mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
  385. }
  386. ndr_desc->num_mappings = region->num_mappings;
  387. region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
  388. if (!region->region) {
  389. dev_err(&p->pdev.dev, "Error registering region %pR\n",
  390. ndr_desc->res);
  391. return -ENXIO;
  392. }
  393. return 0;
  394. }
  395. static int ndtest_init_regions(struct ndtest_priv *p)
  396. {
  397. int i, ret = 0;
  398. for (i = 0; i < p->config->num_regions; i++) {
  399. ret = ndtest_create_region(p, &p->config->regions[i]);
  400. if (ret)
  401. return ret;
  402. }
  403. return 0;
  404. }
  405. static void put_dimms(void *data)
  406. {
  407. struct ndtest_priv *p = data;
  408. int i;
  409. for (i = 0; i < p->config->dimm_count; i++)
  410. if (p->config->dimms[i].dev) {
  411. device_unregister(p->config->dimms[i].dev);
  412. p->config->dimms[i].dev = NULL;
  413. }
  414. }
  415. static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
  416. char *buf)
  417. {
  418. struct ndtest_dimm *dimm = dev_get_drvdata(dev);
  419. return sprintf(buf, "%#x\n", dimm->handle);
  420. }
  421. static DEVICE_ATTR_RO(handle);
  422. static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
  423. char *buf)
  424. {
  425. struct ndtest_dimm *dimm = dev_get_drvdata(dev);
  426. return sprintf(buf, "%#x\n", dimm->fail_cmd);
  427. }
  428. static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
  429. const char *buf, size_t size)
  430. {
  431. struct ndtest_dimm *dimm = dev_get_drvdata(dev);
  432. unsigned long val;
  433. ssize_t rc;
  434. rc = kstrtol(buf, 0, &val);
  435. if (rc)
  436. return rc;
  437. dimm->fail_cmd = val;
  438. return size;
  439. }
  440. static DEVICE_ATTR_RW(fail_cmd);
  441. static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
  442. char *buf)
  443. {
  444. struct ndtest_dimm *dimm = dev_get_drvdata(dev);
  445. return sprintf(buf, "%d\n", dimm->fail_cmd_code);
  446. }
  447. static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
  448. const char *buf, size_t size)
  449. {
  450. struct ndtest_dimm *dimm = dev_get_drvdata(dev);
  451. unsigned long val;
  452. ssize_t rc;
  453. rc = kstrtol(buf, 0, &val);
  454. if (rc)
  455. return rc;
  456. dimm->fail_cmd_code = val;
  457. return size;
  458. }
  459. static DEVICE_ATTR_RW(fail_cmd_code);
  460. static struct attribute *dimm_attributes[] = {
  461. &dev_attr_handle.attr,
  462. &dev_attr_fail_cmd.attr,
  463. &dev_attr_fail_cmd_code.attr,
  464. NULL,
  465. };
  466. static struct attribute_group dimm_attribute_group = {
  467. .attrs = dimm_attributes,
  468. };
  469. static const struct attribute_group *dimm_attribute_groups[] = {
  470. &dimm_attribute_group,
  471. NULL,
  472. };
  473. static ssize_t phys_id_show(struct device *dev,
  474. struct device_attribute *attr, char *buf)
  475. {
  476. struct nvdimm *nvdimm = to_nvdimm(dev);
  477. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  478. return sprintf(buf, "%#x\n", dimm->physical_id);
  479. }
  480. static DEVICE_ATTR_RO(phys_id);
  481. static ssize_t vendor_show(struct device *dev,
  482. struct device_attribute *attr, char *buf)
  483. {
  484. return sprintf(buf, "0x1234567\n");
  485. }
  486. static DEVICE_ATTR_RO(vendor);
  487. static ssize_t id_show(struct device *dev,
  488. struct device_attribute *attr, char *buf)
  489. {
  490. struct nvdimm *nvdimm = to_nvdimm(dev);
  491. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  492. return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
  493. 0xa, 2016, ~(dimm->handle));
  494. }
  495. static DEVICE_ATTR_RO(id);
  496. static ssize_t nvdimm_handle_show(struct device *dev,
  497. struct device_attribute *attr, char *buf)
  498. {
  499. struct nvdimm *nvdimm = to_nvdimm(dev);
  500. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  501. return sprintf(buf, "%#x\n", dimm->handle);
  502. }
  503. static struct device_attribute dev_attr_nvdimm_show_handle = {
  504. .attr = { .name = "handle", .mode = 0444 },
  505. .show = nvdimm_handle_show,
  506. };
  507. static ssize_t subsystem_vendor_show(struct device *dev,
  508. struct device_attribute *attr, char *buf)
  509. {
  510. return sprintf(buf, "0x%04x\n", 0);
  511. }
  512. static DEVICE_ATTR_RO(subsystem_vendor);
  513. static ssize_t dirty_shutdown_show(struct device *dev,
  514. struct device_attribute *attr, char *buf)
  515. {
  516. return sprintf(buf, "%d\n", 42);
  517. }
  518. static DEVICE_ATTR_RO(dirty_shutdown);
  519. static ssize_t formats_show(struct device *dev,
  520. struct device_attribute *attr, char *buf)
  521. {
  522. struct nvdimm *nvdimm = to_nvdimm(dev);
  523. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  524. return sprintf(buf, "%d\n", dimm->num_formats);
  525. }
  526. static DEVICE_ATTR_RO(formats);
  527. static ssize_t format_show(struct device *dev,
  528. struct device_attribute *attr, char *buf)
  529. {
  530. struct nvdimm *nvdimm = to_nvdimm(dev);
  531. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  532. if (dimm->num_formats > 1)
  533. return sprintf(buf, "0x201\n");
  534. return sprintf(buf, "0x101\n");
  535. }
  536. static DEVICE_ATTR_RO(format);
  537. static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
  538. char *buf)
  539. {
  540. return sprintf(buf, "0x301\n");
  541. }
  542. static DEVICE_ATTR_RO(format1);
  543. static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
  544. struct attribute *a, int n)
  545. {
  546. struct device *dev = container_of(kobj, struct device, kobj);
  547. struct nvdimm *nvdimm = to_nvdimm(dev);
  548. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  549. if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
  550. return 0;
  551. return a->mode;
  552. }
  553. static ssize_t flags_show(struct device *dev,
  554. struct device_attribute *attr, char *buf)
  555. {
  556. struct nvdimm *nvdimm = to_nvdimm(dev);
  557. struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
  558. struct seq_buf s;
  559. u64 flags;
  560. flags = dimm->flags;
  561. seq_buf_init(&s, buf, PAGE_SIZE);
  562. if (flags & PAPR_PMEM_UNARMED_MASK)
  563. seq_buf_printf(&s, "not_armed ");
  564. if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
  565. seq_buf_printf(&s, "flush_fail ");
  566. if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
  567. seq_buf_printf(&s, "restore_fail ");
  568. if (flags & PAPR_PMEM_SAVE_MASK)
  569. seq_buf_printf(&s, "save_fail ");
  570. if (flags & PAPR_PMEM_SMART_EVENT_MASK)
  571. seq_buf_printf(&s, "smart_notify ");
  572. if (seq_buf_used(&s))
  573. seq_buf_printf(&s, "\n");
  574. return seq_buf_used(&s);
  575. }
  576. static DEVICE_ATTR_RO(flags);
  577. static struct attribute *ndtest_nvdimm_attributes[] = {
  578. &dev_attr_nvdimm_show_handle.attr,
  579. &dev_attr_vendor.attr,
  580. &dev_attr_id.attr,
  581. &dev_attr_phys_id.attr,
  582. &dev_attr_subsystem_vendor.attr,
  583. &dev_attr_dirty_shutdown.attr,
  584. &dev_attr_formats.attr,
  585. &dev_attr_format.attr,
  586. &dev_attr_format1.attr,
  587. &dev_attr_flags.attr,
  588. NULL,
  589. };
  590. static const struct attribute_group ndtest_nvdimm_attribute_group = {
  591. .name = "papr",
  592. .attrs = ndtest_nvdimm_attributes,
  593. .is_visible = ndtest_nvdimm_attr_visible,
  594. };
  595. static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
  596. &ndtest_nvdimm_attribute_group,
  597. NULL,
  598. };
  599. static int ndtest_dimm_register(struct ndtest_priv *priv,
  600. struct ndtest_dimm *dimm, int id)
  601. {
  602. struct device *dev = &priv->pdev.dev;
  603. unsigned long dimm_flags = dimm->flags;
  604. if (dimm->num_formats > 1)
  605. set_bit(NDD_LABELING, &dimm_flags);
  606. if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
  607. set_bit(NDD_UNARMED, &dimm_flags);
  608. dimm->nvdimm = nvdimm_create(priv->bus, dimm,
  609. ndtest_nvdimm_attribute_groups, dimm_flags,
  610. NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
  611. if (!dimm->nvdimm) {
  612. dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
  613. return -ENXIO;
  614. }
  615. dimm->dev = device_create_with_groups(ndtest_dimm_class,
  616. &priv->pdev.dev,
  617. 0, dimm, dimm_attribute_groups,
  618. "test_dimm%d", id);
  619. if (!dimm->dev) {
  620. pr_err("Could not create dimm device attributes\n");
  621. return -ENOMEM;
  622. }
  623. return 0;
  624. }
  625. static int ndtest_nvdimm_init(struct ndtest_priv *p)
  626. {
  627. struct ndtest_dimm *d;
  628. void *res;
  629. int i, id;
  630. for (i = 0; i < p->config->dimm_count; i++) {
  631. d = &p->config->dimms[i];
  632. d->id = id = p->config->dimm_start + i;
  633. res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
  634. if (!res)
  635. return -ENOMEM;
  636. d->label_area = res;
  637. sprintf(d->label_area, "label%d", id);
  638. d->config_size = LABEL_SIZE;
  639. if (!ndtest_alloc_resource(p, d->size,
  640. &p->dimm_dma[id]))
  641. return -ENOMEM;
  642. if (!ndtest_alloc_resource(p, LABEL_SIZE,
  643. &p->label_dma[id]))
  644. return -ENOMEM;
  645. if (!ndtest_alloc_resource(p, LABEL_SIZE,
  646. &p->dcr_dma[id]))
  647. return -ENOMEM;
  648. d->address = p->dimm_dma[id];
  649. ndtest_dimm_register(p, d, id);
  650. }
  651. return 0;
  652. }
  653. static ssize_t compatible_show(struct device *dev,
  654. struct device_attribute *attr, char *buf)
  655. {
  656. return sprintf(buf, "nvdimm_test");
  657. }
  658. static DEVICE_ATTR_RO(compatible);
  659. static struct attribute *of_node_attributes[] = {
  660. &dev_attr_compatible.attr,
  661. NULL
  662. };
  663. static const struct attribute_group of_node_attribute_group = {
  664. .name = "of_node",
  665. .attrs = of_node_attributes,
  666. };
  667. static const struct attribute_group *ndtest_attribute_groups[] = {
  668. &of_node_attribute_group,
  669. NULL,
  670. };
  671. static int ndtest_bus_register(struct ndtest_priv *p)
  672. {
  673. p->config = &bus_configs[p->pdev.id];
  674. p->bus_desc.ndctl = ndtest_ctl;
  675. p->bus_desc.module = THIS_MODULE;
  676. p->bus_desc.provider_name = NULL;
  677. p->bus_desc.attr_groups = ndtest_attribute_groups;
  678. p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
  679. if (!p->bus) {
  680. dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
  681. return -ENOMEM;
  682. }
  683. return 0;
  684. }
  685. static int ndtest_remove(struct platform_device *pdev)
  686. {
  687. struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
  688. nvdimm_bus_unregister(p->bus);
  689. return 0;
  690. }
  691. static int ndtest_probe(struct platform_device *pdev)
  692. {
  693. struct ndtest_priv *p;
  694. int rc;
  695. p = to_ndtest_priv(&pdev->dev);
  696. if (ndtest_bus_register(p))
  697. return -ENOMEM;
  698. p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
  699. sizeof(dma_addr_t), GFP_KERNEL);
  700. p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
  701. sizeof(dma_addr_t), GFP_KERNEL);
  702. p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
  703. sizeof(dma_addr_t), GFP_KERNEL);
  704. rc = ndtest_nvdimm_init(p);
  705. if (rc)
  706. goto err;
  707. rc = ndtest_init_regions(p);
  708. if (rc)
  709. goto err;
  710. rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
  711. if (rc)
  712. goto err;
  713. platform_set_drvdata(pdev, p);
  714. return 0;
  715. err:
  716. pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
  717. return rc;
  718. }
  719. static const struct platform_device_id ndtest_id[] = {
  720. { KBUILD_MODNAME },
  721. { },
  722. };
  723. static struct platform_driver ndtest_driver = {
  724. .probe = ndtest_probe,
  725. .remove = ndtest_remove,
  726. .driver = {
  727. .name = KBUILD_MODNAME,
  728. },
  729. .id_table = ndtest_id,
  730. };
  731. static void ndtest_release(struct device *dev)
  732. {
  733. struct ndtest_priv *p = to_ndtest_priv(dev);
  734. kfree(p);
  735. }
  736. static void cleanup_devices(void)
  737. {
  738. int i;
  739. for (i = 0; i < NUM_INSTANCES; i++)
  740. if (instances[i])
  741. platform_device_unregister(&instances[i]->pdev);
  742. nfit_test_teardown();
  743. if (ndtest_pool)
  744. gen_pool_destroy(ndtest_pool);
  745. if (ndtest_dimm_class)
  746. class_destroy(ndtest_dimm_class);
  747. }
  748. static __init int ndtest_init(void)
  749. {
  750. int rc, i;
  751. pmem_test();
  752. libnvdimm_test();
  753. device_dax_test();
  754. dax_pmem_test();
  755. nfit_test_setup(ndtest_resource_lookup, NULL);
  756. ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
  757. if (IS_ERR(ndtest_dimm_class)) {
  758. rc = PTR_ERR(ndtest_dimm_class);
  759. goto err_register;
  760. }
  761. ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
  762. if (!ndtest_pool) {
  763. rc = -ENOMEM;
  764. goto err_register;
  765. }
  766. if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
  767. rc = -ENOMEM;
  768. goto err_register;
  769. }
  770. /* Each instance can be taken as a bus, which can have multiple dimms */
  771. for (i = 0; i < NUM_INSTANCES; i++) {
  772. struct ndtest_priv *priv;
  773. struct platform_device *pdev;
  774. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  775. if (!priv) {
  776. rc = -ENOMEM;
  777. goto err_register;
  778. }
  779. INIT_LIST_HEAD(&priv->resources);
  780. pdev = &priv->pdev;
  781. pdev->name = KBUILD_MODNAME;
  782. pdev->id = i;
  783. pdev->dev.release = ndtest_release;
  784. rc = platform_device_register(pdev);
  785. if (rc) {
  786. put_device(&pdev->dev);
  787. goto err_register;
  788. }
  789. get_device(&pdev->dev);
  790. instances[i] = priv;
  791. }
  792. rc = platform_driver_register(&ndtest_driver);
  793. if (rc)
  794. goto err_register;
  795. return 0;
  796. err_register:
  797. pr_err("Error registering platform device\n");
  798. cleanup_devices();
  799. return rc;
  800. }
  801. static __exit void ndtest_exit(void)
  802. {
  803. cleanup_devices();
  804. platform_driver_unregister(&ndtest_driver);
  805. }
  806. module_init(ndtest_init);
  807. module_exit(ndtest_exit);
  808. MODULE_LICENSE("GPL");
  809. MODULE_AUTHOR("IBM Corporation");