sysfs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2014 IBM Corp.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/device.h>
  7. #include <linux/sysfs.h>
  8. #include <linux/pci_regs.h>
  9. #include "cxl.h"
  10. #define to_afu_chardev_m(d) dev_get_drvdata(d)
  11. /********* Adapter attributes **********************************************/
  12. static ssize_t caia_version_show(struct device *device,
  13. struct device_attribute *attr,
  14. char *buf)
  15. {
  16. struct cxl *adapter = to_cxl_adapter(device);
  17. return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
  18. adapter->caia_minor);
  19. }
  20. static ssize_t psl_revision_show(struct device *device,
  21. struct device_attribute *attr,
  22. char *buf)
  23. {
  24. struct cxl *adapter = to_cxl_adapter(device);
  25. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
  26. }
  27. static ssize_t base_image_show(struct device *device,
  28. struct device_attribute *attr,
  29. char *buf)
  30. {
  31. struct cxl *adapter = to_cxl_adapter(device);
  32. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
  33. }
  34. static ssize_t image_loaded_show(struct device *device,
  35. struct device_attribute *attr,
  36. char *buf)
  37. {
  38. struct cxl *adapter = to_cxl_adapter(device);
  39. if (adapter->user_image_loaded)
  40. return scnprintf(buf, PAGE_SIZE, "user\n");
  41. return scnprintf(buf, PAGE_SIZE, "factory\n");
  42. }
  43. static ssize_t psl_timebase_synced_show(struct device *device,
  44. struct device_attribute *attr,
  45. char *buf)
  46. {
  47. struct cxl *adapter = to_cxl_adapter(device);
  48. u64 psl_tb, delta;
  49. /* Recompute the status only in native mode */
  50. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  51. psl_tb = adapter->native->sl_ops->timebase_read(adapter);
  52. delta = abs(mftb() - psl_tb);
  53. /* CORE TB and PSL TB difference <= 16usecs ? */
  54. adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
  55. pr_devel("PSL timebase %s - delta: 0x%016llx\n",
  56. (tb_to_ns(delta) < 16000) ? "synchronized" :
  57. "not synchronized", tb_to_ns(delta));
  58. }
  59. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
  60. }
  61. static ssize_t tunneled_ops_supported_show(struct device *device,
  62. struct device_attribute *attr,
  63. char *buf)
  64. {
  65. struct cxl *adapter = to_cxl_adapter(device);
  66. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
  67. }
  68. static ssize_t reset_adapter_store(struct device *device,
  69. struct device_attribute *attr,
  70. const char *buf, size_t count)
  71. {
  72. struct cxl *adapter = to_cxl_adapter(device);
  73. int rc;
  74. int val;
  75. rc = sscanf(buf, "%i", &val);
  76. if ((rc != 1) || (val != 1 && val != -1))
  77. return -EINVAL;
  78. /*
  79. * See if we can lock the context mapping that's only allowed
  80. * when there are no contexts attached to the adapter. Once
  81. * taken this will also prevent any context from getting activated.
  82. */
  83. if (val == 1) {
  84. rc = cxl_adapter_context_lock(adapter);
  85. if (rc)
  86. goto out;
  87. rc = cxl_ops->adapter_reset(adapter);
  88. /* In case reset failed release context lock */
  89. if (rc)
  90. cxl_adapter_context_unlock(adapter);
  91. } else if (val == -1) {
  92. /* Perform a forced adapter reset */
  93. rc = cxl_ops->adapter_reset(adapter);
  94. }
  95. out:
  96. return rc ? rc : count;
  97. }
  98. static ssize_t load_image_on_perst_show(struct device *device,
  99. struct device_attribute *attr,
  100. char *buf)
  101. {
  102. struct cxl *adapter = to_cxl_adapter(device);
  103. if (!adapter->perst_loads_image)
  104. return scnprintf(buf, PAGE_SIZE, "none\n");
  105. if (adapter->perst_select_user)
  106. return scnprintf(buf, PAGE_SIZE, "user\n");
  107. return scnprintf(buf, PAGE_SIZE, "factory\n");
  108. }
  109. static ssize_t load_image_on_perst_store(struct device *device,
  110. struct device_attribute *attr,
  111. const char *buf, size_t count)
  112. {
  113. struct cxl *adapter = to_cxl_adapter(device);
  114. int rc;
  115. if (!strncmp(buf, "none", 4))
  116. adapter->perst_loads_image = false;
  117. else if (!strncmp(buf, "user", 4)) {
  118. adapter->perst_select_user = true;
  119. adapter->perst_loads_image = true;
  120. } else if (!strncmp(buf, "factory", 7)) {
  121. adapter->perst_select_user = false;
  122. adapter->perst_loads_image = true;
  123. } else
  124. return -EINVAL;
  125. if ((rc = cxl_update_image_control(adapter)))
  126. return rc;
  127. return count;
  128. }
  129. static ssize_t perst_reloads_same_image_show(struct device *device,
  130. struct device_attribute *attr,
  131. char *buf)
  132. {
  133. struct cxl *adapter = to_cxl_adapter(device);
  134. return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
  135. }
  136. static ssize_t perst_reloads_same_image_store(struct device *device,
  137. struct device_attribute *attr,
  138. const char *buf, size_t count)
  139. {
  140. struct cxl *adapter = to_cxl_adapter(device);
  141. int rc;
  142. int val;
  143. rc = sscanf(buf, "%i", &val);
  144. if ((rc != 1) || !(val == 1 || val == 0))
  145. return -EINVAL;
  146. adapter->perst_same_image = (val == 1);
  147. return count;
  148. }
  149. static struct device_attribute adapter_attrs[] = {
  150. __ATTR_RO(caia_version),
  151. __ATTR_RO(psl_revision),
  152. __ATTR_RO(base_image),
  153. __ATTR_RO(image_loaded),
  154. __ATTR_RO(psl_timebase_synced),
  155. __ATTR_RO(tunneled_ops_supported),
  156. __ATTR_RW(load_image_on_perst),
  157. __ATTR_RW(perst_reloads_same_image),
  158. __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
  159. };
  160. /********* AFU master specific attributes **********************************/
  161. static ssize_t mmio_size_show_master(struct device *device,
  162. struct device_attribute *attr,
  163. char *buf)
  164. {
  165. struct cxl_afu *afu = to_afu_chardev_m(device);
  166. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
  167. }
  168. static ssize_t pp_mmio_off_show(struct device *device,
  169. struct device_attribute *attr,
  170. char *buf)
  171. {
  172. struct cxl_afu *afu = to_afu_chardev_m(device);
  173. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
  174. }
  175. static ssize_t pp_mmio_len_show(struct device *device,
  176. struct device_attribute *attr,
  177. char *buf)
  178. {
  179. struct cxl_afu *afu = to_afu_chardev_m(device);
  180. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
  181. }
  182. static struct device_attribute afu_master_attrs[] = {
  183. __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
  184. __ATTR_RO(pp_mmio_off),
  185. __ATTR_RO(pp_mmio_len),
  186. };
  187. /********* AFU attributes **************************************************/
  188. static ssize_t mmio_size_show(struct device *device,
  189. struct device_attribute *attr,
  190. char *buf)
  191. {
  192. struct cxl_afu *afu = to_cxl_afu(device);
  193. if (afu->pp_size)
  194. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
  195. return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
  196. }
  197. static ssize_t reset_store_afu(struct device *device,
  198. struct device_attribute *attr,
  199. const char *buf, size_t count)
  200. {
  201. struct cxl_afu *afu = to_cxl_afu(device);
  202. int rc;
  203. /* Not safe to reset if it is currently in use */
  204. mutex_lock(&afu->contexts_lock);
  205. if (!idr_is_empty(&afu->contexts_idr)) {
  206. rc = -EBUSY;
  207. goto err;
  208. }
  209. if ((rc = cxl_ops->afu_reset(afu)))
  210. goto err;
  211. rc = count;
  212. err:
  213. mutex_unlock(&afu->contexts_lock);
  214. return rc;
  215. }
  216. static ssize_t irqs_min_show(struct device *device,
  217. struct device_attribute *attr,
  218. char *buf)
  219. {
  220. struct cxl_afu *afu = to_cxl_afu(device);
  221. return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
  222. }
  223. static ssize_t irqs_max_show(struct device *device,
  224. struct device_attribute *attr,
  225. char *buf)
  226. {
  227. struct cxl_afu *afu = to_cxl_afu(device);
  228. return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
  229. }
  230. static ssize_t irqs_max_store(struct device *device,
  231. struct device_attribute *attr,
  232. const char *buf, size_t count)
  233. {
  234. struct cxl_afu *afu = to_cxl_afu(device);
  235. ssize_t ret;
  236. int irqs_max;
  237. ret = sscanf(buf, "%i", &irqs_max);
  238. if (ret != 1)
  239. return -EINVAL;
  240. if (irqs_max < afu->pp_irqs)
  241. return -EINVAL;
  242. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  243. if (irqs_max > afu->adapter->user_irqs)
  244. return -EINVAL;
  245. } else {
  246. /* pHyp sets a per-AFU limit */
  247. if (irqs_max > afu->guest->max_ints)
  248. return -EINVAL;
  249. }
  250. afu->irqs_max = irqs_max;
  251. return count;
  252. }
  253. static ssize_t modes_supported_show(struct device *device,
  254. struct device_attribute *attr, char *buf)
  255. {
  256. struct cxl_afu *afu = to_cxl_afu(device);
  257. char *p = buf, *end = buf + PAGE_SIZE;
  258. if (afu->modes_supported & CXL_MODE_DEDICATED)
  259. p += scnprintf(p, end - p, "dedicated_process\n");
  260. if (afu->modes_supported & CXL_MODE_DIRECTED)
  261. p += scnprintf(p, end - p, "afu_directed\n");
  262. return (p - buf);
  263. }
  264. static ssize_t prefault_mode_show(struct device *device,
  265. struct device_attribute *attr,
  266. char *buf)
  267. {
  268. struct cxl_afu *afu = to_cxl_afu(device);
  269. switch (afu->prefault_mode) {
  270. case CXL_PREFAULT_WED:
  271. return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
  272. case CXL_PREFAULT_ALL:
  273. return scnprintf(buf, PAGE_SIZE, "all\n");
  274. default:
  275. return scnprintf(buf, PAGE_SIZE, "none\n");
  276. }
  277. }
  278. static ssize_t prefault_mode_store(struct device *device,
  279. struct device_attribute *attr,
  280. const char *buf, size_t count)
  281. {
  282. struct cxl_afu *afu = to_cxl_afu(device);
  283. enum prefault_modes mode = -1;
  284. if (!strncmp(buf, "none", 4))
  285. mode = CXL_PREFAULT_NONE;
  286. else {
  287. if (!radix_enabled()) {
  288. /* only allowed when not in radix mode */
  289. if (!strncmp(buf, "work_element_descriptor", 23))
  290. mode = CXL_PREFAULT_WED;
  291. if (!strncmp(buf, "all", 3))
  292. mode = CXL_PREFAULT_ALL;
  293. } else {
  294. dev_err(device, "Cannot prefault with radix enabled\n");
  295. }
  296. }
  297. if (mode == -1)
  298. return -EINVAL;
  299. afu->prefault_mode = mode;
  300. return count;
  301. }
  302. static ssize_t mode_show(struct device *device,
  303. struct device_attribute *attr,
  304. char *buf)
  305. {
  306. struct cxl_afu *afu = to_cxl_afu(device);
  307. if (afu->current_mode == CXL_MODE_DEDICATED)
  308. return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
  309. if (afu->current_mode == CXL_MODE_DIRECTED)
  310. return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
  311. return scnprintf(buf, PAGE_SIZE, "none\n");
  312. }
  313. static ssize_t mode_store(struct device *device, struct device_attribute *attr,
  314. const char *buf, size_t count)
  315. {
  316. struct cxl_afu *afu = to_cxl_afu(device);
  317. int old_mode, mode = -1;
  318. int rc = -EBUSY;
  319. /* can't change this if we have a user */
  320. mutex_lock(&afu->contexts_lock);
  321. if (!idr_is_empty(&afu->contexts_idr))
  322. goto err;
  323. if (!strncmp(buf, "dedicated_process", 17))
  324. mode = CXL_MODE_DEDICATED;
  325. if (!strncmp(buf, "afu_directed", 12))
  326. mode = CXL_MODE_DIRECTED;
  327. if (!strncmp(buf, "none", 4))
  328. mode = 0;
  329. if (mode == -1) {
  330. rc = -EINVAL;
  331. goto err;
  332. }
  333. /*
  334. * afu_deactivate_mode needs to be done outside the lock, prevent
  335. * other contexts coming in before we are ready:
  336. */
  337. old_mode = afu->current_mode;
  338. afu->current_mode = 0;
  339. afu->num_procs = 0;
  340. mutex_unlock(&afu->contexts_lock);
  341. if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
  342. return rc;
  343. if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
  344. return rc;
  345. return count;
  346. err:
  347. mutex_unlock(&afu->contexts_lock);
  348. return rc;
  349. }
  350. static ssize_t api_version_show(struct device *device,
  351. struct device_attribute *attr,
  352. char *buf)
  353. {
  354. return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
  355. }
  356. static ssize_t api_version_compatible_show(struct device *device,
  357. struct device_attribute *attr,
  358. char *buf)
  359. {
  360. return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
  361. }
  362. static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
  363. struct bin_attribute *bin_attr, char *buf,
  364. loff_t off, size_t count)
  365. {
  366. struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
  367. return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
  368. }
  369. static struct device_attribute afu_attrs[] = {
  370. __ATTR_RO(mmio_size),
  371. __ATTR_RO(irqs_min),
  372. __ATTR_RW(irqs_max),
  373. __ATTR_RO(modes_supported),
  374. __ATTR_RW(mode),
  375. __ATTR_RW(prefault_mode),
  376. __ATTR_RO(api_version),
  377. __ATTR_RO(api_version_compatible),
  378. __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
  379. };
  380. int cxl_sysfs_adapter_add(struct cxl *adapter)
  381. {
  382. struct device_attribute *dev_attr;
  383. int i, rc;
  384. for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
  385. dev_attr = &adapter_attrs[i];
  386. if (cxl_ops->support_attributes(dev_attr->attr.name,
  387. CXL_ADAPTER_ATTRS)) {
  388. if ((rc = device_create_file(&adapter->dev, dev_attr)))
  389. goto err;
  390. }
  391. }
  392. return 0;
  393. err:
  394. for (i--; i >= 0; i--) {
  395. dev_attr = &adapter_attrs[i];
  396. if (cxl_ops->support_attributes(dev_attr->attr.name,
  397. CXL_ADAPTER_ATTRS))
  398. device_remove_file(&adapter->dev, dev_attr);
  399. }
  400. return rc;
  401. }
  402. void cxl_sysfs_adapter_remove(struct cxl *adapter)
  403. {
  404. struct device_attribute *dev_attr;
  405. int i;
  406. for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
  407. dev_attr = &adapter_attrs[i];
  408. if (cxl_ops->support_attributes(dev_attr->attr.name,
  409. CXL_ADAPTER_ATTRS))
  410. device_remove_file(&adapter->dev, dev_attr);
  411. }
  412. }
  413. struct afu_config_record {
  414. struct kobject kobj;
  415. struct bin_attribute config_attr;
  416. struct list_head list;
  417. int cr;
  418. u16 device;
  419. u16 vendor;
  420. u32 class;
  421. };
  422. #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
  423. static ssize_t vendor_show(struct kobject *kobj,
  424. struct kobj_attribute *attr, char *buf)
  425. {
  426. struct afu_config_record *cr = to_cr(kobj);
  427. return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
  428. }
  429. static ssize_t device_show(struct kobject *kobj,
  430. struct kobj_attribute *attr, char *buf)
  431. {
  432. struct afu_config_record *cr = to_cr(kobj);
  433. return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
  434. }
  435. static ssize_t class_show(struct kobject *kobj,
  436. struct kobj_attribute *attr, char *buf)
  437. {
  438. struct afu_config_record *cr = to_cr(kobj);
  439. return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
  440. }
  441. static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
  442. struct bin_attribute *bin_attr, char *buf,
  443. loff_t off, size_t count)
  444. {
  445. struct afu_config_record *cr = to_cr(kobj);
  446. struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
  447. u64 i, j, val, rc;
  448. for (i = 0; i < count;) {
  449. rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
  450. if (rc)
  451. val = ~0ULL;
  452. for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
  453. buf[i] = (val >> (j * 8)) & 0xff;
  454. }
  455. return count;
  456. }
  457. static struct kobj_attribute vendor_attribute =
  458. __ATTR_RO(vendor);
  459. static struct kobj_attribute device_attribute =
  460. __ATTR_RO(device);
  461. static struct kobj_attribute class_attribute =
  462. __ATTR_RO(class);
  463. static struct attribute *afu_cr_attrs[] = {
  464. &vendor_attribute.attr,
  465. &device_attribute.attr,
  466. &class_attribute.attr,
  467. NULL,
  468. };
  469. ATTRIBUTE_GROUPS(afu_cr);
  470. static void release_afu_config_record(struct kobject *kobj)
  471. {
  472. struct afu_config_record *cr = to_cr(kobj);
  473. kfree(cr);
  474. }
  475. static struct kobj_type afu_config_record_type = {
  476. .sysfs_ops = &kobj_sysfs_ops,
  477. .release = release_afu_config_record,
  478. .default_groups = afu_cr_groups,
  479. };
  480. static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
  481. {
  482. struct afu_config_record *cr;
  483. int rc;
  484. cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
  485. if (!cr)
  486. return ERR_PTR(-ENOMEM);
  487. cr->cr = cr_idx;
  488. rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
  489. if (rc)
  490. goto err;
  491. rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
  492. if (rc)
  493. goto err;
  494. rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
  495. if (rc)
  496. goto err;
  497. cr->class >>= 8;
  498. /*
  499. * Export raw AFU PCIe like config record. For now this is read only by
  500. * root - we can expand that later to be readable by non-root and maybe
  501. * even writable provided we have a good use-case. Once we support
  502. * exposing AFUs through a virtual PHB they will get that for free from
  503. * Linux' PCI infrastructure, but until then it's not clear that we
  504. * need it for anything since the main use case is just identifying
  505. * AFUs, which can be done via the vendor, device and class attributes.
  506. */
  507. sysfs_bin_attr_init(&cr->config_attr);
  508. cr->config_attr.attr.name = "config";
  509. cr->config_attr.attr.mode = S_IRUSR;
  510. cr->config_attr.size = afu->crs_len;
  511. cr->config_attr.read = afu_read_config;
  512. rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
  513. &afu->dev.kobj, "cr%i", cr->cr);
  514. if (rc)
  515. goto err1;
  516. rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
  517. if (rc)
  518. goto err1;
  519. rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
  520. if (rc)
  521. goto err2;
  522. return cr;
  523. err2:
  524. sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
  525. err1:
  526. kobject_put(&cr->kobj);
  527. return ERR_PTR(rc);
  528. err:
  529. kfree(cr);
  530. return ERR_PTR(rc);
  531. }
  532. void cxl_sysfs_afu_remove(struct cxl_afu *afu)
  533. {
  534. struct device_attribute *dev_attr;
  535. struct afu_config_record *cr, *tmp;
  536. int i;
  537. /* remove the err buffer bin attribute */
  538. if (afu->eb_len)
  539. device_remove_bin_file(&afu->dev, &afu->attr_eb);
  540. for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
  541. dev_attr = &afu_attrs[i];
  542. if (cxl_ops->support_attributes(dev_attr->attr.name,
  543. CXL_AFU_ATTRS))
  544. device_remove_file(&afu->dev, &afu_attrs[i]);
  545. }
  546. list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
  547. sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
  548. kobject_put(&cr->kobj);
  549. }
  550. }
  551. int cxl_sysfs_afu_add(struct cxl_afu *afu)
  552. {
  553. struct device_attribute *dev_attr;
  554. struct afu_config_record *cr;
  555. int i, rc;
  556. INIT_LIST_HEAD(&afu->crs);
  557. for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
  558. dev_attr = &afu_attrs[i];
  559. if (cxl_ops->support_attributes(dev_attr->attr.name,
  560. CXL_AFU_ATTRS)) {
  561. if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
  562. goto err;
  563. }
  564. }
  565. /* conditionally create the add the binary file for error info buffer */
  566. if (afu->eb_len) {
  567. sysfs_attr_init(&afu->attr_eb.attr);
  568. afu->attr_eb.attr.name = "afu_err_buff";
  569. afu->attr_eb.attr.mode = S_IRUGO;
  570. afu->attr_eb.size = afu->eb_len;
  571. afu->attr_eb.read = afu_eb_read;
  572. rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
  573. if (rc) {
  574. dev_err(&afu->dev,
  575. "Unable to create eb attr for the afu. Err(%d)\n",
  576. rc);
  577. goto err;
  578. }
  579. }
  580. for (i = 0; i < afu->crs_num; i++) {
  581. cr = cxl_sysfs_afu_new_cr(afu, i);
  582. if (IS_ERR(cr)) {
  583. rc = PTR_ERR(cr);
  584. goto err1;
  585. }
  586. list_add(&cr->list, &afu->crs);
  587. }
  588. return 0;
  589. err1:
  590. cxl_sysfs_afu_remove(afu);
  591. return rc;
  592. err:
  593. /* reset the eb_len as we havent created the bin attr */
  594. afu->eb_len = 0;
  595. for (i--; i >= 0; i--) {
  596. dev_attr = &afu_attrs[i];
  597. if (cxl_ops->support_attributes(dev_attr->attr.name,
  598. CXL_AFU_ATTRS))
  599. device_remove_file(&afu->dev, &afu_attrs[i]);
  600. }
  601. return rc;
  602. }
  603. int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
  604. {
  605. struct device_attribute *dev_attr;
  606. int i, rc;
  607. for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
  608. dev_attr = &afu_master_attrs[i];
  609. if (cxl_ops->support_attributes(dev_attr->attr.name,
  610. CXL_AFU_MASTER_ATTRS)) {
  611. if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
  612. goto err;
  613. }
  614. }
  615. return 0;
  616. err:
  617. for (i--; i >= 0; i--) {
  618. dev_attr = &afu_master_attrs[i];
  619. if (cxl_ops->support_attributes(dev_attr->attr.name,
  620. CXL_AFU_MASTER_ATTRS))
  621. device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
  622. }
  623. return rc;
  624. }
  625. void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
  626. {
  627. struct device_attribute *dev_attr;
  628. int i;
  629. for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
  630. dev_attr = &afu_master_attrs[i];
  631. if (cxl_ops->support_attributes(dev_attr->attr.name,
  632. CXL_AFU_MASTER_ATTRS))
  633. device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
  634. }
  635. }