sysfs.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
  3. #include <linux/init.h>
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/pci.h>
  7. #include <linux/device.h>
  8. #include <linux/io-64-nonatomic-lo-hi.h>
  9. #include <uapi/linux/idxd.h>
  10. #include "registers.h"
  11. #include "idxd.h"
  12. static char *idxd_wq_type_names[] = {
  13. [IDXD_WQT_NONE] = "none",
  14. [IDXD_WQT_KERNEL] = "kernel",
  15. [IDXD_WQT_USER] = "user",
  16. };
  17. /* IDXD engine attributes */
  18. static ssize_t engine_group_id_show(struct device *dev,
  19. struct device_attribute *attr, char *buf)
  20. {
  21. struct idxd_engine *engine = confdev_to_engine(dev);
  22. if (engine->group)
  23. return sysfs_emit(buf, "%d\n", engine->group->id);
  24. else
  25. return sysfs_emit(buf, "%d\n", -1);
  26. }
  27. static ssize_t engine_group_id_store(struct device *dev,
  28. struct device_attribute *attr,
  29. const char *buf, size_t count)
  30. {
  31. struct idxd_engine *engine = confdev_to_engine(dev);
  32. struct idxd_device *idxd = engine->idxd;
  33. long id;
  34. int rc;
  35. struct idxd_group *prevg;
  36. rc = kstrtol(buf, 10, &id);
  37. if (rc < 0)
  38. return -EINVAL;
  39. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  40. return -EPERM;
  41. if (id > idxd->max_groups - 1 || id < -1)
  42. return -EINVAL;
  43. if (id == -1) {
  44. if (engine->group) {
  45. engine->group->num_engines--;
  46. engine->group = NULL;
  47. }
  48. return count;
  49. }
  50. prevg = engine->group;
  51. if (prevg)
  52. prevg->num_engines--;
  53. engine->group = idxd->groups[id];
  54. engine->group->num_engines++;
  55. return count;
  56. }
  57. static struct device_attribute dev_attr_engine_group =
  58. __ATTR(group_id, 0644, engine_group_id_show,
  59. engine_group_id_store);
  60. static struct attribute *idxd_engine_attributes[] = {
  61. &dev_attr_engine_group.attr,
  62. NULL,
  63. };
  64. static const struct attribute_group idxd_engine_attribute_group = {
  65. .attrs = idxd_engine_attributes,
  66. };
  67. static const struct attribute_group *idxd_engine_attribute_groups[] = {
  68. &idxd_engine_attribute_group,
  69. NULL,
  70. };
  71. static void idxd_conf_engine_release(struct device *dev)
  72. {
  73. struct idxd_engine *engine = confdev_to_engine(dev);
  74. kfree(engine);
  75. }
  76. struct device_type idxd_engine_device_type = {
  77. .name = "engine",
  78. .release = idxd_conf_engine_release,
  79. .groups = idxd_engine_attribute_groups,
  80. };
  81. /* Group attributes */
  82. static void idxd_set_free_rdbufs(struct idxd_device *idxd)
  83. {
  84. int i, rdbufs;
  85. for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
  86. struct idxd_group *g = idxd->groups[i];
  87. rdbufs += g->rdbufs_reserved;
  88. }
  89. idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
  90. }
  91. static ssize_t group_read_buffers_reserved_show(struct device *dev,
  92. struct device_attribute *attr,
  93. char *buf)
  94. {
  95. struct idxd_group *group = confdev_to_group(dev);
  96. return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
  97. }
  98. static ssize_t group_tokens_reserved_show(struct device *dev,
  99. struct device_attribute *attr,
  100. char *buf)
  101. {
  102. dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
  103. return group_read_buffers_reserved_show(dev, attr, buf);
  104. }
  105. static ssize_t group_read_buffers_reserved_store(struct device *dev,
  106. struct device_attribute *attr,
  107. const char *buf, size_t count)
  108. {
  109. struct idxd_group *group = confdev_to_group(dev);
  110. struct idxd_device *idxd = group->idxd;
  111. unsigned long val;
  112. int rc;
  113. rc = kstrtoul(buf, 10, &val);
  114. if (rc < 0)
  115. return -EINVAL;
  116. if (idxd->data->type == IDXD_TYPE_IAX)
  117. return -EOPNOTSUPP;
  118. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  119. return -EPERM;
  120. if (idxd->state == IDXD_DEV_ENABLED)
  121. return -EPERM;
  122. if (val > idxd->max_rdbufs)
  123. return -EINVAL;
  124. if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
  125. return -EINVAL;
  126. group->rdbufs_reserved = val;
  127. idxd_set_free_rdbufs(idxd);
  128. return count;
  129. }
  130. static ssize_t group_tokens_reserved_store(struct device *dev,
  131. struct device_attribute *attr,
  132. const char *buf, size_t count)
  133. {
  134. dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
  135. return group_read_buffers_reserved_store(dev, attr, buf, count);
  136. }
  137. static struct device_attribute dev_attr_group_tokens_reserved =
  138. __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
  139. group_tokens_reserved_store);
  140. static struct device_attribute dev_attr_group_read_buffers_reserved =
  141. __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
  142. group_read_buffers_reserved_store);
  143. static ssize_t group_read_buffers_allowed_show(struct device *dev,
  144. struct device_attribute *attr,
  145. char *buf)
  146. {
  147. struct idxd_group *group = confdev_to_group(dev);
  148. return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
  149. }
  150. static ssize_t group_tokens_allowed_show(struct device *dev,
  151. struct device_attribute *attr,
  152. char *buf)
  153. {
  154. dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
  155. return group_read_buffers_allowed_show(dev, attr, buf);
  156. }
  157. static ssize_t group_read_buffers_allowed_store(struct device *dev,
  158. struct device_attribute *attr,
  159. const char *buf, size_t count)
  160. {
  161. struct idxd_group *group = confdev_to_group(dev);
  162. struct idxd_device *idxd = group->idxd;
  163. unsigned long val;
  164. int rc;
  165. rc = kstrtoul(buf, 10, &val);
  166. if (rc < 0)
  167. return -EINVAL;
  168. if (idxd->data->type == IDXD_TYPE_IAX)
  169. return -EOPNOTSUPP;
  170. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  171. return -EPERM;
  172. if (idxd->state == IDXD_DEV_ENABLED)
  173. return -EPERM;
  174. if (val < 4 * group->num_engines ||
  175. val > group->rdbufs_reserved + idxd->nr_rdbufs)
  176. return -EINVAL;
  177. group->rdbufs_allowed = val;
  178. return count;
  179. }
  180. static ssize_t group_tokens_allowed_store(struct device *dev,
  181. struct device_attribute *attr,
  182. const char *buf, size_t count)
  183. {
  184. dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
  185. return group_read_buffers_allowed_store(dev, attr, buf, count);
  186. }
  187. static struct device_attribute dev_attr_group_tokens_allowed =
  188. __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
  189. group_tokens_allowed_store);
  190. static struct device_attribute dev_attr_group_read_buffers_allowed =
  191. __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
  192. group_read_buffers_allowed_store);
  193. static ssize_t group_use_read_buffer_limit_show(struct device *dev,
  194. struct device_attribute *attr,
  195. char *buf)
  196. {
  197. struct idxd_group *group = confdev_to_group(dev);
  198. return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
  199. }
  200. static ssize_t group_use_token_limit_show(struct device *dev,
  201. struct device_attribute *attr,
  202. char *buf)
  203. {
  204. dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
  205. return group_use_read_buffer_limit_show(dev, attr, buf);
  206. }
  207. static ssize_t group_use_read_buffer_limit_store(struct device *dev,
  208. struct device_attribute *attr,
  209. const char *buf, size_t count)
  210. {
  211. struct idxd_group *group = confdev_to_group(dev);
  212. struct idxd_device *idxd = group->idxd;
  213. unsigned long val;
  214. int rc;
  215. rc = kstrtoul(buf, 10, &val);
  216. if (rc < 0)
  217. return -EINVAL;
  218. if (idxd->data->type == IDXD_TYPE_IAX)
  219. return -EOPNOTSUPP;
  220. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  221. return -EPERM;
  222. if (idxd->state == IDXD_DEV_ENABLED)
  223. return -EPERM;
  224. if (idxd->rdbuf_limit == 0)
  225. return -EPERM;
  226. group->use_rdbuf_limit = !!val;
  227. return count;
  228. }
  229. static ssize_t group_use_token_limit_store(struct device *dev,
  230. struct device_attribute *attr,
  231. const char *buf, size_t count)
  232. {
  233. dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
  234. return group_use_read_buffer_limit_store(dev, attr, buf, count);
  235. }
  236. static struct device_attribute dev_attr_group_use_token_limit =
  237. __ATTR(use_token_limit, 0644, group_use_token_limit_show,
  238. group_use_token_limit_store);
  239. static struct device_attribute dev_attr_group_use_read_buffer_limit =
  240. __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
  241. group_use_read_buffer_limit_store);
  242. static ssize_t group_engines_show(struct device *dev,
  243. struct device_attribute *attr, char *buf)
  244. {
  245. struct idxd_group *group = confdev_to_group(dev);
  246. int i, rc = 0;
  247. struct idxd_device *idxd = group->idxd;
  248. for (i = 0; i < idxd->max_engines; i++) {
  249. struct idxd_engine *engine = idxd->engines[i];
  250. if (!engine->group)
  251. continue;
  252. if (engine->group->id == group->id)
  253. rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
  254. }
  255. if (!rc)
  256. return 0;
  257. rc--;
  258. rc += sysfs_emit_at(buf, rc, "\n");
  259. return rc;
  260. }
  261. static struct device_attribute dev_attr_group_engines =
  262. __ATTR(engines, 0444, group_engines_show, NULL);
  263. static ssize_t group_work_queues_show(struct device *dev,
  264. struct device_attribute *attr, char *buf)
  265. {
  266. struct idxd_group *group = confdev_to_group(dev);
  267. int i, rc = 0;
  268. struct idxd_device *idxd = group->idxd;
  269. for (i = 0; i < idxd->max_wqs; i++) {
  270. struct idxd_wq *wq = idxd->wqs[i];
  271. if (!wq->group)
  272. continue;
  273. if (wq->group->id == group->id)
  274. rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
  275. }
  276. if (!rc)
  277. return 0;
  278. rc--;
  279. rc += sysfs_emit_at(buf, rc, "\n");
  280. return rc;
  281. }
  282. static struct device_attribute dev_attr_group_work_queues =
  283. __ATTR(work_queues, 0444, group_work_queues_show, NULL);
  284. static ssize_t group_traffic_class_a_show(struct device *dev,
  285. struct device_attribute *attr,
  286. char *buf)
  287. {
  288. struct idxd_group *group = confdev_to_group(dev);
  289. return sysfs_emit(buf, "%d\n", group->tc_a);
  290. }
  291. static ssize_t group_traffic_class_a_store(struct device *dev,
  292. struct device_attribute *attr,
  293. const char *buf, size_t count)
  294. {
  295. struct idxd_group *group = confdev_to_group(dev);
  296. struct idxd_device *idxd = group->idxd;
  297. long val;
  298. int rc;
  299. rc = kstrtol(buf, 10, &val);
  300. if (rc < 0)
  301. return -EINVAL;
  302. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  303. return -EPERM;
  304. if (idxd->state == IDXD_DEV_ENABLED)
  305. return -EPERM;
  306. if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
  307. return -EPERM;
  308. if (val < 0 || val > 7)
  309. return -EINVAL;
  310. group->tc_a = val;
  311. return count;
  312. }
  313. static struct device_attribute dev_attr_group_traffic_class_a =
  314. __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
  315. group_traffic_class_a_store);
  316. static ssize_t group_traffic_class_b_show(struct device *dev,
  317. struct device_attribute *attr,
  318. char *buf)
  319. {
  320. struct idxd_group *group = confdev_to_group(dev);
  321. return sysfs_emit(buf, "%d\n", group->tc_b);
  322. }
  323. static ssize_t group_traffic_class_b_store(struct device *dev,
  324. struct device_attribute *attr,
  325. const char *buf, size_t count)
  326. {
  327. struct idxd_group *group = confdev_to_group(dev);
  328. struct idxd_device *idxd = group->idxd;
  329. long val;
  330. int rc;
  331. rc = kstrtol(buf, 10, &val);
  332. if (rc < 0)
  333. return -EINVAL;
  334. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  335. return -EPERM;
  336. if (idxd->state == IDXD_DEV_ENABLED)
  337. return -EPERM;
  338. if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
  339. return -EPERM;
  340. if (val < 0 || val > 7)
  341. return -EINVAL;
  342. group->tc_b = val;
  343. return count;
  344. }
  345. static struct device_attribute dev_attr_group_traffic_class_b =
  346. __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
  347. group_traffic_class_b_store);
  348. static ssize_t group_desc_progress_limit_show(struct device *dev,
  349. struct device_attribute *attr,
  350. char *buf)
  351. {
  352. struct idxd_group *group = confdev_to_group(dev);
  353. return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
  354. }
  355. static ssize_t group_desc_progress_limit_store(struct device *dev,
  356. struct device_attribute *attr,
  357. const char *buf, size_t count)
  358. {
  359. struct idxd_group *group = confdev_to_group(dev);
  360. int val, rc;
  361. rc = kstrtoint(buf, 10, &val);
  362. if (rc < 0)
  363. return -EINVAL;
  364. if (val & ~GENMASK(1, 0))
  365. return -EINVAL;
  366. group->desc_progress_limit = val;
  367. return count;
  368. }
  369. static struct device_attribute dev_attr_group_desc_progress_limit =
  370. __ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
  371. group_desc_progress_limit_store);
  372. static ssize_t group_batch_progress_limit_show(struct device *dev,
  373. struct device_attribute *attr,
  374. char *buf)
  375. {
  376. struct idxd_group *group = confdev_to_group(dev);
  377. return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
  378. }
  379. static ssize_t group_batch_progress_limit_store(struct device *dev,
  380. struct device_attribute *attr,
  381. const char *buf, size_t count)
  382. {
  383. struct idxd_group *group = confdev_to_group(dev);
  384. int val, rc;
  385. rc = kstrtoint(buf, 10, &val);
  386. if (rc < 0)
  387. return -EINVAL;
  388. if (val & ~GENMASK(1, 0))
  389. return -EINVAL;
  390. group->batch_progress_limit = val;
  391. return count;
  392. }
  393. static struct device_attribute dev_attr_group_batch_progress_limit =
  394. __ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
  395. group_batch_progress_limit_store);
  396. static struct attribute *idxd_group_attributes[] = {
  397. &dev_attr_group_work_queues.attr,
  398. &dev_attr_group_engines.attr,
  399. &dev_attr_group_use_token_limit.attr,
  400. &dev_attr_group_use_read_buffer_limit.attr,
  401. &dev_attr_group_tokens_allowed.attr,
  402. &dev_attr_group_read_buffers_allowed.attr,
  403. &dev_attr_group_tokens_reserved.attr,
  404. &dev_attr_group_read_buffers_reserved.attr,
  405. &dev_attr_group_traffic_class_a.attr,
  406. &dev_attr_group_traffic_class_b.attr,
  407. &dev_attr_group_desc_progress_limit.attr,
  408. &dev_attr_group_batch_progress_limit.attr,
  409. NULL,
  410. };
  411. static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
  412. struct idxd_device *idxd)
  413. {
  414. return (attr == &dev_attr_group_desc_progress_limit.attr ||
  415. attr == &dev_attr_group_batch_progress_limit.attr) &&
  416. !idxd->hw.group_cap.progress_limit;
  417. }
  418. static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr,
  419. struct idxd_device *idxd)
  420. {
  421. /*
  422. * Intel IAA does not support Read Buffer allocation control,
  423. * make these attributes invisible.
  424. */
  425. return (attr == &dev_attr_group_use_token_limit.attr ||
  426. attr == &dev_attr_group_use_read_buffer_limit.attr ||
  427. attr == &dev_attr_group_tokens_allowed.attr ||
  428. attr == &dev_attr_group_read_buffers_allowed.attr ||
  429. attr == &dev_attr_group_tokens_reserved.attr ||
  430. attr == &dev_attr_group_read_buffers_reserved.attr) &&
  431. idxd->data->type == IDXD_TYPE_IAX;
  432. }
  433. static umode_t idxd_group_attr_visible(struct kobject *kobj,
  434. struct attribute *attr, int n)
  435. {
  436. struct device *dev = container_of(kobj, struct device, kobj);
  437. struct idxd_group *group = confdev_to_group(dev);
  438. struct idxd_device *idxd = group->idxd;
  439. if (idxd_group_attr_progress_limit_invisible(attr, idxd))
  440. return 0;
  441. if (idxd_group_attr_read_buffers_invisible(attr, idxd))
  442. return 0;
  443. return attr->mode;
  444. }
  445. static const struct attribute_group idxd_group_attribute_group = {
  446. .attrs = idxd_group_attributes,
  447. .is_visible = idxd_group_attr_visible,
  448. };
  449. static const struct attribute_group *idxd_group_attribute_groups[] = {
  450. &idxd_group_attribute_group,
  451. NULL,
  452. };
  453. static void idxd_conf_group_release(struct device *dev)
  454. {
  455. struct idxd_group *group = confdev_to_group(dev);
  456. kfree(group);
  457. }
  458. struct device_type idxd_group_device_type = {
  459. .name = "group",
  460. .release = idxd_conf_group_release,
  461. .groups = idxd_group_attribute_groups,
  462. };
  463. /* IDXD work queue attribs */
  464. static ssize_t wq_clients_show(struct device *dev,
  465. struct device_attribute *attr, char *buf)
  466. {
  467. struct idxd_wq *wq = confdev_to_wq(dev);
  468. return sysfs_emit(buf, "%d\n", wq->client_count);
  469. }
  470. static struct device_attribute dev_attr_wq_clients =
  471. __ATTR(clients, 0444, wq_clients_show, NULL);
  472. static ssize_t wq_state_show(struct device *dev,
  473. struct device_attribute *attr, char *buf)
  474. {
  475. struct idxd_wq *wq = confdev_to_wq(dev);
  476. switch (wq->state) {
  477. case IDXD_WQ_DISABLED:
  478. return sysfs_emit(buf, "disabled\n");
  479. case IDXD_WQ_ENABLED:
  480. return sysfs_emit(buf, "enabled\n");
  481. }
  482. return sysfs_emit(buf, "unknown\n");
  483. }
  484. static struct device_attribute dev_attr_wq_state =
  485. __ATTR(state, 0444, wq_state_show, NULL);
  486. static ssize_t wq_group_id_show(struct device *dev,
  487. struct device_attribute *attr, char *buf)
  488. {
  489. struct idxd_wq *wq = confdev_to_wq(dev);
  490. if (wq->group)
  491. return sysfs_emit(buf, "%u\n", wq->group->id);
  492. else
  493. return sysfs_emit(buf, "-1\n");
  494. }
  495. static ssize_t wq_group_id_store(struct device *dev,
  496. struct device_attribute *attr,
  497. const char *buf, size_t count)
  498. {
  499. struct idxd_wq *wq = confdev_to_wq(dev);
  500. struct idxd_device *idxd = wq->idxd;
  501. long id;
  502. int rc;
  503. struct idxd_group *prevg, *group;
  504. rc = kstrtol(buf, 10, &id);
  505. if (rc < 0)
  506. return -EINVAL;
  507. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  508. return -EPERM;
  509. if (wq->state != IDXD_WQ_DISABLED)
  510. return -EPERM;
  511. if (id > idxd->max_groups - 1 || id < -1)
  512. return -EINVAL;
  513. if (id == -1) {
  514. if (wq->group) {
  515. wq->group->num_wqs--;
  516. wq->group = NULL;
  517. }
  518. return count;
  519. }
  520. group = idxd->groups[id];
  521. prevg = wq->group;
  522. if (prevg)
  523. prevg->num_wqs--;
  524. wq->group = group;
  525. group->num_wqs++;
  526. return count;
  527. }
  528. static struct device_attribute dev_attr_wq_group_id =
  529. __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
  530. static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
  531. char *buf)
  532. {
  533. struct idxd_wq *wq = confdev_to_wq(dev);
  534. return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
  535. }
  536. static ssize_t wq_mode_store(struct device *dev,
  537. struct device_attribute *attr, const char *buf,
  538. size_t count)
  539. {
  540. struct idxd_wq *wq = confdev_to_wq(dev);
  541. struct idxd_device *idxd = wq->idxd;
  542. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  543. return -EPERM;
  544. if (wq->state != IDXD_WQ_DISABLED)
  545. return -EPERM;
  546. if (sysfs_streq(buf, "dedicated")) {
  547. set_bit(WQ_FLAG_DEDICATED, &wq->flags);
  548. wq->threshold = 0;
  549. } else if (sysfs_streq(buf, "shared")) {
  550. clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
  551. } else {
  552. return -EINVAL;
  553. }
  554. return count;
  555. }
  556. static struct device_attribute dev_attr_wq_mode =
  557. __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
  558. static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
  559. char *buf)
  560. {
  561. struct idxd_wq *wq = confdev_to_wq(dev);
  562. return sysfs_emit(buf, "%u\n", wq->size);
  563. }
  564. static int total_claimed_wq_size(struct idxd_device *idxd)
  565. {
  566. int i;
  567. int wq_size = 0;
  568. for (i = 0; i < idxd->max_wqs; i++) {
  569. struct idxd_wq *wq = idxd->wqs[i];
  570. wq_size += wq->size;
  571. }
  572. return wq_size;
  573. }
  574. static ssize_t wq_size_store(struct device *dev,
  575. struct device_attribute *attr, const char *buf,
  576. size_t count)
  577. {
  578. struct idxd_wq *wq = confdev_to_wq(dev);
  579. unsigned long size;
  580. struct idxd_device *idxd = wq->idxd;
  581. int rc;
  582. rc = kstrtoul(buf, 10, &size);
  583. if (rc < 0)
  584. return -EINVAL;
  585. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  586. return -EPERM;
  587. if (idxd->state == IDXD_DEV_ENABLED)
  588. return -EPERM;
  589. if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
  590. return -EINVAL;
  591. wq->size = size;
  592. return count;
  593. }
  594. static struct device_attribute dev_attr_wq_size =
  595. __ATTR(size, 0644, wq_size_show, wq_size_store);
  596. static ssize_t wq_priority_show(struct device *dev,
  597. struct device_attribute *attr, char *buf)
  598. {
  599. struct idxd_wq *wq = confdev_to_wq(dev);
  600. return sysfs_emit(buf, "%u\n", wq->priority);
  601. }
  602. static ssize_t wq_priority_store(struct device *dev,
  603. struct device_attribute *attr,
  604. const char *buf, size_t count)
  605. {
  606. struct idxd_wq *wq = confdev_to_wq(dev);
  607. unsigned long prio;
  608. struct idxd_device *idxd = wq->idxd;
  609. int rc;
  610. rc = kstrtoul(buf, 10, &prio);
  611. if (rc < 0)
  612. return -EINVAL;
  613. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  614. return -EPERM;
  615. if (wq->state != IDXD_WQ_DISABLED)
  616. return -EPERM;
  617. if (prio > IDXD_MAX_PRIORITY)
  618. return -EINVAL;
  619. wq->priority = prio;
  620. return count;
  621. }
  622. static struct device_attribute dev_attr_wq_priority =
  623. __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
  624. static ssize_t wq_block_on_fault_show(struct device *dev,
  625. struct device_attribute *attr, char *buf)
  626. {
  627. struct idxd_wq *wq = confdev_to_wq(dev);
  628. return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
  629. }
  630. static ssize_t wq_block_on_fault_store(struct device *dev,
  631. struct device_attribute *attr,
  632. const char *buf, size_t count)
  633. {
  634. struct idxd_wq *wq = confdev_to_wq(dev);
  635. struct idxd_device *idxd = wq->idxd;
  636. bool bof;
  637. int rc;
  638. if (!idxd->hw.gen_cap.block_on_fault)
  639. return -EOPNOTSUPP;
  640. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  641. return -EPERM;
  642. if (wq->state != IDXD_WQ_DISABLED)
  643. return -ENXIO;
  644. rc = kstrtobool(buf, &bof);
  645. if (rc < 0)
  646. return rc;
  647. if (bof)
  648. set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
  649. else
  650. clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
  651. return count;
  652. }
  653. static struct device_attribute dev_attr_wq_block_on_fault =
  654. __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
  655. wq_block_on_fault_store);
  656. static ssize_t wq_threshold_show(struct device *dev,
  657. struct device_attribute *attr, char *buf)
  658. {
  659. struct idxd_wq *wq = confdev_to_wq(dev);
  660. return sysfs_emit(buf, "%u\n", wq->threshold);
  661. }
  662. static ssize_t wq_threshold_store(struct device *dev,
  663. struct device_attribute *attr,
  664. const char *buf, size_t count)
  665. {
  666. struct idxd_wq *wq = confdev_to_wq(dev);
  667. struct idxd_device *idxd = wq->idxd;
  668. unsigned int val;
  669. int rc;
  670. rc = kstrtouint(buf, 0, &val);
  671. if (rc < 0)
  672. return -EINVAL;
  673. if (val > wq->size || val <= 0)
  674. return -EINVAL;
  675. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  676. return -EPERM;
  677. if (wq->state != IDXD_WQ_DISABLED)
  678. return -ENXIO;
  679. if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
  680. return -EINVAL;
  681. wq->threshold = val;
  682. return count;
  683. }
  684. static struct device_attribute dev_attr_wq_threshold =
  685. __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
  686. static ssize_t wq_type_show(struct device *dev,
  687. struct device_attribute *attr, char *buf)
  688. {
  689. struct idxd_wq *wq = confdev_to_wq(dev);
  690. switch (wq->type) {
  691. case IDXD_WQT_KERNEL:
  692. return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
  693. case IDXD_WQT_USER:
  694. return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
  695. case IDXD_WQT_NONE:
  696. default:
  697. return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
  698. }
  699. return -EINVAL;
  700. }
  701. static ssize_t wq_type_store(struct device *dev,
  702. struct device_attribute *attr, const char *buf,
  703. size_t count)
  704. {
  705. struct idxd_wq *wq = confdev_to_wq(dev);
  706. enum idxd_wq_type old_type;
  707. if (wq->state != IDXD_WQ_DISABLED)
  708. return -EPERM;
  709. old_type = wq->type;
  710. if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
  711. wq->type = IDXD_WQT_NONE;
  712. else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
  713. wq->type = IDXD_WQT_KERNEL;
  714. else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
  715. wq->type = IDXD_WQT_USER;
  716. else
  717. return -EINVAL;
  718. /* If we are changing queue type, clear the name */
  719. if (wq->type != old_type)
  720. memset(wq->name, 0, WQ_NAME_SIZE + 1);
  721. return count;
  722. }
  723. static struct device_attribute dev_attr_wq_type =
  724. __ATTR(type, 0644, wq_type_show, wq_type_store);
  725. static ssize_t wq_name_show(struct device *dev,
  726. struct device_attribute *attr, char *buf)
  727. {
  728. struct idxd_wq *wq = confdev_to_wq(dev);
  729. return sysfs_emit(buf, "%s\n", wq->name);
  730. }
  731. static ssize_t wq_name_store(struct device *dev,
  732. struct device_attribute *attr, const char *buf,
  733. size_t count)
  734. {
  735. struct idxd_wq *wq = confdev_to_wq(dev);
  736. char *input, *pos;
  737. if (wq->state != IDXD_WQ_DISABLED)
  738. return -EPERM;
  739. if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
  740. return -EINVAL;
  741. /*
  742. * This is temporarily placed here until we have SVM support for
  743. * dmaengine.
  744. */
  745. if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
  746. return -EOPNOTSUPP;
  747. input = kstrndup(buf, count, GFP_KERNEL);
  748. if (!input)
  749. return -ENOMEM;
  750. pos = strim(input);
  751. memset(wq->name, 0, WQ_NAME_SIZE + 1);
  752. sprintf(wq->name, "%s", pos);
  753. kfree(input);
  754. return count;
  755. }
  756. static struct device_attribute dev_attr_wq_name =
  757. __ATTR(name, 0644, wq_name_show, wq_name_store);
  758. static ssize_t wq_cdev_minor_show(struct device *dev,
  759. struct device_attribute *attr, char *buf)
  760. {
  761. struct idxd_wq *wq = confdev_to_wq(dev);
  762. int minor = -1;
  763. mutex_lock(&wq->wq_lock);
  764. if (wq->idxd_cdev)
  765. minor = wq->idxd_cdev->minor;
  766. mutex_unlock(&wq->wq_lock);
  767. if (minor == -1)
  768. return -ENXIO;
  769. return sysfs_emit(buf, "%d\n", minor);
  770. }
  771. static struct device_attribute dev_attr_wq_cdev_minor =
  772. __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
  773. static int __get_sysfs_u64(const char *buf, u64 *val)
  774. {
  775. int rc;
  776. rc = kstrtou64(buf, 0, val);
  777. if (rc < 0)
  778. return -EINVAL;
  779. if (*val == 0)
  780. return -EINVAL;
  781. *val = roundup_pow_of_two(*val);
  782. return 0;
  783. }
  784. static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
  785. char *buf)
  786. {
  787. struct idxd_wq *wq = confdev_to_wq(dev);
  788. return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
  789. }
  790. static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
  791. const char *buf, size_t count)
  792. {
  793. struct idxd_wq *wq = confdev_to_wq(dev);
  794. struct idxd_device *idxd = wq->idxd;
  795. u64 xfer_size;
  796. int rc;
  797. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  798. return -EPERM;
  799. if (wq->state != IDXD_WQ_DISABLED)
  800. return -EPERM;
  801. rc = __get_sysfs_u64(buf, &xfer_size);
  802. if (rc < 0)
  803. return rc;
  804. if (xfer_size > idxd->max_xfer_bytes)
  805. return -EINVAL;
  806. wq->max_xfer_bytes = xfer_size;
  807. return count;
  808. }
  809. static struct device_attribute dev_attr_wq_max_transfer_size =
  810. __ATTR(max_transfer_size, 0644,
  811. wq_max_transfer_size_show, wq_max_transfer_size_store);
  812. static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
  813. {
  814. struct idxd_wq *wq = confdev_to_wq(dev);
  815. return sysfs_emit(buf, "%u\n", wq->max_batch_size);
  816. }
  817. static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
  818. const char *buf, size_t count)
  819. {
  820. struct idxd_wq *wq = confdev_to_wq(dev);
  821. struct idxd_device *idxd = wq->idxd;
  822. u64 batch_size;
  823. int rc;
  824. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  825. return -EPERM;
  826. if (wq->state != IDXD_WQ_DISABLED)
  827. return -EPERM;
  828. rc = __get_sysfs_u64(buf, &batch_size);
  829. if (rc < 0)
  830. return rc;
  831. if (batch_size > idxd->max_batch_size)
  832. return -EINVAL;
  833. idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
  834. return count;
  835. }
  836. static struct device_attribute dev_attr_wq_max_batch_size =
  837. __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
  838. static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
  839. {
  840. struct idxd_wq *wq = confdev_to_wq(dev);
  841. return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
  842. }
  843. static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
  844. const char *buf, size_t count)
  845. {
  846. struct idxd_wq *wq = confdev_to_wq(dev);
  847. struct idxd_device *idxd = wq->idxd;
  848. bool ats_dis;
  849. int rc;
  850. if (wq->state != IDXD_WQ_DISABLED)
  851. return -EPERM;
  852. if (!idxd->hw.wq_cap.wq_ats_support)
  853. return -EOPNOTSUPP;
  854. rc = kstrtobool(buf, &ats_dis);
  855. if (rc < 0)
  856. return rc;
  857. if (ats_dis)
  858. set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
  859. else
  860. clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
  861. return count;
  862. }
  863. static struct device_attribute dev_attr_wq_ats_disable =
  864. __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
  865. static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
  866. {
  867. struct idxd_wq *wq = confdev_to_wq(dev);
  868. struct idxd_device *idxd = wq->idxd;
  869. u32 occup, offset;
  870. if (!idxd->hw.wq_cap.occupancy)
  871. return -EOPNOTSUPP;
  872. offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
  873. occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
  874. return sysfs_emit(buf, "%u\n", occup);
  875. }
  876. static struct device_attribute dev_attr_wq_occupancy =
  877. __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
  878. static ssize_t wq_enqcmds_retries_show(struct device *dev,
  879. struct device_attribute *attr, char *buf)
  880. {
  881. struct idxd_wq *wq = confdev_to_wq(dev);
  882. if (wq_dedicated(wq))
  883. return -EOPNOTSUPP;
  884. return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
  885. }
  886. static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
  887. const char *buf, size_t count)
  888. {
  889. struct idxd_wq *wq = confdev_to_wq(dev);
  890. int rc;
  891. unsigned int retries;
  892. if (wq_dedicated(wq))
  893. return -EOPNOTSUPP;
  894. rc = kstrtouint(buf, 10, &retries);
  895. if (rc < 0)
  896. return rc;
  897. if (retries > IDXD_ENQCMDS_MAX_RETRIES)
  898. retries = IDXD_ENQCMDS_MAX_RETRIES;
  899. wq->enqcmds_retries = retries;
  900. return count;
  901. }
  902. static struct device_attribute dev_attr_wq_enqcmds_retries =
  903. __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
  904. static ssize_t wq_op_config_show(struct device *dev,
  905. struct device_attribute *attr, char *buf)
  906. {
  907. struct idxd_wq *wq = confdev_to_wq(dev);
  908. return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
  909. }
  910. static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
  911. {
  912. int bit;
  913. /*
  914. * The OPCAP is defined as 256 bits that represents each operation the device
  915. * supports per bit. Iterate through all the bits and check if the input mask
  916. * is set for bits that are not set in the OPCAP for the device. If no OPCAP
  917. * bit is set and input mask has the bit set, then return error.
  918. */
  919. for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
  920. if (!test_bit(bit, idxd->opcap_bmap))
  921. return -EINVAL;
  922. }
  923. return 0;
  924. }
  925. static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
  926. const char *buf, size_t count)
  927. {
  928. struct idxd_wq *wq = confdev_to_wq(dev);
  929. struct idxd_device *idxd = wq->idxd;
  930. unsigned long *opmask;
  931. int rc;
  932. if (wq->state != IDXD_WQ_DISABLED)
  933. return -EPERM;
  934. opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
  935. if (!opmask)
  936. return -ENOMEM;
  937. rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
  938. if (rc < 0)
  939. goto err;
  940. rc = idxd_verify_supported_opcap(idxd, opmask);
  941. if (rc < 0)
  942. goto err;
  943. bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
  944. bitmap_free(opmask);
  945. return count;
  946. err:
  947. bitmap_free(opmask);
  948. return rc;
  949. }
  950. static struct device_attribute dev_attr_wq_op_config =
  951. __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
  952. static struct attribute *idxd_wq_attributes[] = {
  953. &dev_attr_wq_clients.attr,
  954. &dev_attr_wq_state.attr,
  955. &dev_attr_wq_group_id.attr,
  956. &dev_attr_wq_mode.attr,
  957. &dev_attr_wq_size.attr,
  958. &dev_attr_wq_priority.attr,
  959. &dev_attr_wq_block_on_fault.attr,
  960. &dev_attr_wq_threshold.attr,
  961. &dev_attr_wq_type.attr,
  962. &dev_attr_wq_name.attr,
  963. &dev_attr_wq_cdev_minor.attr,
  964. &dev_attr_wq_max_transfer_size.attr,
  965. &dev_attr_wq_max_batch_size.attr,
  966. &dev_attr_wq_ats_disable.attr,
  967. &dev_attr_wq_occupancy.attr,
  968. &dev_attr_wq_enqcmds_retries.attr,
  969. &dev_attr_wq_op_config.attr,
  970. NULL,
  971. };
  972. static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
  973. struct idxd_device *idxd)
  974. {
  975. return attr == &dev_attr_wq_op_config.attr &&
  976. !idxd->hw.wq_cap.op_config;
  977. }
  978. static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
  979. struct idxd_device *idxd)
  980. {
  981. /* Intel IAA does not support batch processing, make it invisible */
  982. return attr == &dev_attr_wq_max_batch_size.attr &&
  983. idxd->data->type == IDXD_TYPE_IAX;
  984. }
  985. static umode_t idxd_wq_attr_visible(struct kobject *kobj,
  986. struct attribute *attr, int n)
  987. {
  988. struct device *dev = container_of(kobj, struct device, kobj);
  989. struct idxd_wq *wq = confdev_to_wq(dev);
  990. struct idxd_device *idxd = wq->idxd;
  991. if (idxd_wq_attr_op_config_invisible(attr, idxd))
  992. return 0;
  993. if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
  994. return 0;
  995. return attr->mode;
  996. }
  997. static const struct attribute_group idxd_wq_attribute_group = {
  998. .attrs = idxd_wq_attributes,
  999. .is_visible = idxd_wq_attr_visible,
  1000. };
  1001. static const struct attribute_group *idxd_wq_attribute_groups[] = {
  1002. &idxd_wq_attribute_group,
  1003. NULL,
  1004. };
  1005. static void idxd_conf_wq_release(struct device *dev)
  1006. {
  1007. struct idxd_wq *wq = confdev_to_wq(dev);
  1008. bitmap_free(wq->opcap_bmap);
  1009. kfree(wq->wqcfg);
  1010. kfree(wq);
  1011. }
  1012. struct device_type idxd_wq_device_type = {
  1013. .name = "wq",
  1014. .release = idxd_conf_wq_release,
  1015. .groups = idxd_wq_attribute_groups,
  1016. };
  1017. /* IDXD device attribs */
  1018. static ssize_t version_show(struct device *dev, struct device_attribute *attr,
  1019. char *buf)
  1020. {
  1021. struct idxd_device *idxd = confdev_to_idxd(dev);
  1022. return sysfs_emit(buf, "%#x\n", idxd->hw.version);
  1023. }
  1024. static DEVICE_ATTR_RO(version);
  1025. static ssize_t max_work_queues_size_show(struct device *dev,
  1026. struct device_attribute *attr,
  1027. char *buf)
  1028. {
  1029. struct idxd_device *idxd = confdev_to_idxd(dev);
  1030. return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
  1031. }
  1032. static DEVICE_ATTR_RO(max_work_queues_size);
  1033. static ssize_t max_groups_show(struct device *dev,
  1034. struct device_attribute *attr, char *buf)
  1035. {
  1036. struct idxd_device *idxd = confdev_to_idxd(dev);
  1037. return sysfs_emit(buf, "%u\n", idxd->max_groups);
  1038. }
  1039. static DEVICE_ATTR_RO(max_groups);
  1040. static ssize_t max_work_queues_show(struct device *dev,
  1041. struct device_attribute *attr, char *buf)
  1042. {
  1043. struct idxd_device *idxd = confdev_to_idxd(dev);
  1044. return sysfs_emit(buf, "%u\n", idxd->max_wqs);
  1045. }
  1046. static DEVICE_ATTR_RO(max_work_queues);
  1047. static ssize_t max_engines_show(struct device *dev,
  1048. struct device_attribute *attr, char *buf)
  1049. {
  1050. struct idxd_device *idxd = confdev_to_idxd(dev);
  1051. return sysfs_emit(buf, "%u\n", idxd->max_engines);
  1052. }
  1053. static DEVICE_ATTR_RO(max_engines);
  1054. static ssize_t numa_node_show(struct device *dev,
  1055. struct device_attribute *attr, char *buf)
  1056. {
  1057. struct idxd_device *idxd = confdev_to_idxd(dev);
  1058. return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
  1059. }
  1060. static DEVICE_ATTR_RO(numa_node);
  1061. static ssize_t max_batch_size_show(struct device *dev,
  1062. struct device_attribute *attr, char *buf)
  1063. {
  1064. struct idxd_device *idxd = confdev_to_idxd(dev);
  1065. return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
  1066. }
  1067. static DEVICE_ATTR_RO(max_batch_size);
  1068. static ssize_t max_transfer_size_show(struct device *dev,
  1069. struct device_attribute *attr,
  1070. char *buf)
  1071. {
  1072. struct idxd_device *idxd = confdev_to_idxd(dev);
  1073. return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
  1074. }
  1075. static DEVICE_ATTR_RO(max_transfer_size);
  1076. static ssize_t op_cap_show(struct device *dev,
  1077. struct device_attribute *attr, char *buf)
  1078. {
  1079. struct idxd_device *idxd = confdev_to_idxd(dev);
  1080. return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
  1081. }
  1082. static DEVICE_ATTR_RO(op_cap);
  1083. static ssize_t gen_cap_show(struct device *dev,
  1084. struct device_attribute *attr, char *buf)
  1085. {
  1086. struct idxd_device *idxd = confdev_to_idxd(dev);
  1087. return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
  1088. }
  1089. static DEVICE_ATTR_RO(gen_cap);
  1090. static ssize_t configurable_show(struct device *dev,
  1091. struct device_attribute *attr, char *buf)
  1092. {
  1093. struct idxd_device *idxd = confdev_to_idxd(dev);
  1094. return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
  1095. }
  1096. static DEVICE_ATTR_RO(configurable);
  1097. static ssize_t clients_show(struct device *dev,
  1098. struct device_attribute *attr, char *buf)
  1099. {
  1100. struct idxd_device *idxd = confdev_to_idxd(dev);
  1101. int count = 0, i;
  1102. spin_lock(&idxd->dev_lock);
  1103. for (i = 0; i < idxd->max_wqs; i++) {
  1104. struct idxd_wq *wq = idxd->wqs[i];
  1105. count += wq->client_count;
  1106. }
  1107. spin_unlock(&idxd->dev_lock);
  1108. return sysfs_emit(buf, "%d\n", count);
  1109. }
  1110. static DEVICE_ATTR_RO(clients);
  1111. static ssize_t pasid_enabled_show(struct device *dev,
  1112. struct device_attribute *attr, char *buf)
  1113. {
  1114. struct idxd_device *idxd = confdev_to_idxd(dev);
  1115. return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
  1116. }
  1117. static DEVICE_ATTR_RO(pasid_enabled);
  1118. static ssize_t state_show(struct device *dev,
  1119. struct device_attribute *attr, char *buf)
  1120. {
  1121. struct idxd_device *idxd = confdev_to_idxd(dev);
  1122. switch (idxd->state) {
  1123. case IDXD_DEV_DISABLED:
  1124. return sysfs_emit(buf, "disabled\n");
  1125. case IDXD_DEV_ENABLED:
  1126. return sysfs_emit(buf, "enabled\n");
  1127. case IDXD_DEV_HALTED:
  1128. return sysfs_emit(buf, "halted\n");
  1129. }
  1130. return sysfs_emit(buf, "unknown\n");
  1131. }
  1132. static DEVICE_ATTR_RO(state);
  1133. static ssize_t errors_show(struct device *dev,
  1134. struct device_attribute *attr, char *buf)
  1135. {
  1136. struct idxd_device *idxd = confdev_to_idxd(dev);
  1137. int i, out = 0;
  1138. spin_lock(&idxd->dev_lock);
  1139. for (i = 0; i < 4; i++)
  1140. out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
  1141. spin_unlock(&idxd->dev_lock);
  1142. out--;
  1143. out += sysfs_emit_at(buf, out, "\n");
  1144. return out;
  1145. }
  1146. static DEVICE_ATTR_RO(errors);
  1147. static ssize_t max_read_buffers_show(struct device *dev,
  1148. struct device_attribute *attr, char *buf)
  1149. {
  1150. struct idxd_device *idxd = confdev_to_idxd(dev);
  1151. return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
  1152. }
  1153. static ssize_t max_tokens_show(struct device *dev,
  1154. struct device_attribute *attr, char *buf)
  1155. {
  1156. dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
  1157. return max_read_buffers_show(dev, attr, buf);
  1158. }
  1159. static DEVICE_ATTR_RO(max_tokens); /* deprecated */
  1160. static DEVICE_ATTR_RO(max_read_buffers);
  1161. static ssize_t read_buffer_limit_show(struct device *dev,
  1162. struct device_attribute *attr, char *buf)
  1163. {
  1164. struct idxd_device *idxd = confdev_to_idxd(dev);
  1165. return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
  1166. }
  1167. static ssize_t token_limit_show(struct device *dev,
  1168. struct device_attribute *attr, char *buf)
  1169. {
  1170. dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
  1171. return read_buffer_limit_show(dev, attr, buf);
  1172. }
  1173. static ssize_t read_buffer_limit_store(struct device *dev,
  1174. struct device_attribute *attr,
  1175. const char *buf, size_t count)
  1176. {
  1177. struct idxd_device *idxd = confdev_to_idxd(dev);
  1178. unsigned long val;
  1179. int rc;
  1180. rc = kstrtoul(buf, 10, &val);
  1181. if (rc < 0)
  1182. return -EINVAL;
  1183. if (idxd->state == IDXD_DEV_ENABLED)
  1184. return -EPERM;
  1185. if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
  1186. return -EPERM;
  1187. if (!idxd->hw.group_cap.rdbuf_limit)
  1188. return -EPERM;
  1189. if (val > idxd->hw.group_cap.total_rdbufs)
  1190. return -EINVAL;
  1191. idxd->rdbuf_limit = val;
  1192. return count;
  1193. }
  1194. static ssize_t token_limit_store(struct device *dev,
  1195. struct device_attribute *attr,
  1196. const char *buf, size_t count)
  1197. {
  1198. dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
  1199. return read_buffer_limit_store(dev, attr, buf, count);
  1200. }
  1201. static DEVICE_ATTR_RW(token_limit); /* deprecated */
  1202. static DEVICE_ATTR_RW(read_buffer_limit);
  1203. static ssize_t cdev_major_show(struct device *dev,
  1204. struct device_attribute *attr, char *buf)
  1205. {
  1206. struct idxd_device *idxd = confdev_to_idxd(dev);
  1207. return sysfs_emit(buf, "%u\n", idxd->major);
  1208. }
  1209. static DEVICE_ATTR_RO(cdev_major);
  1210. static ssize_t cmd_status_show(struct device *dev,
  1211. struct device_attribute *attr, char *buf)
  1212. {
  1213. struct idxd_device *idxd = confdev_to_idxd(dev);
  1214. return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
  1215. }
  1216. static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
  1217. const char *buf, size_t count)
  1218. {
  1219. struct idxd_device *idxd = confdev_to_idxd(dev);
  1220. idxd->cmd_status = 0;
  1221. return count;
  1222. }
  1223. static DEVICE_ATTR_RW(cmd_status);
  1224. static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
  1225. struct idxd_device *idxd)
  1226. {
  1227. /* Intel IAA does not support batch processing, make it invisible */
  1228. return attr == &dev_attr_max_batch_size.attr &&
  1229. idxd->data->type == IDXD_TYPE_IAX;
  1230. }
  1231. static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
  1232. struct idxd_device *idxd)
  1233. {
  1234. /*
  1235. * Intel IAA does not support Read Buffer allocation control,
  1236. * make these attributes invisible.
  1237. */
  1238. return (attr == &dev_attr_max_tokens.attr ||
  1239. attr == &dev_attr_max_read_buffers.attr ||
  1240. attr == &dev_attr_token_limit.attr ||
  1241. attr == &dev_attr_read_buffer_limit.attr) &&
  1242. idxd->data->type == IDXD_TYPE_IAX;
  1243. }
  1244. static umode_t idxd_device_attr_visible(struct kobject *kobj,
  1245. struct attribute *attr, int n)
  1246. {
  1247. struct device *dev = container_of(kobj, struct device, kobj);
  1248. struct idxd_device *idxd = confdev_to_idxd(dev);
  1249. if (idxd_device_attr_max_batch_size_invisible(attr, idxd))
  1250. return 0;
  1251. if (idxd_device_attr_read_buffers_invisible(attr, idxd))
  1252. return 0;
  1253. return attr->mode;
  1254. }
  1255. static struct attribute *idxd_device_attributes[] = {
  1256. &dev_attr_version.attr,
  1257. &dev_attr_max_groups.attr,
  1258. &dev_attr_max_work_queues.attr,
  1259. &dev_attr_max_work_queues_size.attr,
  1260. &dev_attr_max_engines.attr,
  1261. &dev_attr_numa_node.attr,
  1262. &dev_attr_max_batch_size.attr,
  1263. &dev_attr_max_transfer_size.attr,
  1264. &dev_attr_op_cap.attr,
  1265. &dev_attr_gen_cap.attr,
  1266. &dev_attr_configurable.attr,
  1267. &dev_attr_clients.attr,
  1268. &dev_attr_pasid_enabled.attr,
  1269. &dev_attr_state.attr,
  1270. &dev_attr_errors.attr,
  1271. &dev_attr_max_tokens.attr,
  1272. &dev_attr_max_read_buffers.attr,
  1273. &dev_attr_token_limit.attr,
  1274. &dev_attr_read_buffer_limit.attr,
  1275. &dev_attr_cdev_major.attr,
  1276. &dev_attr_cmd_status.attr,
  1277. NULL,
  1278. };
  1279. static const struct attribute_group idxd_device_attribute_group = {
  1280. .attrs = idxd_device_attributes,
  1281. .is_visible = idxd_device_attr_visible,
  1282. };
  1283. static const struct attribute_group *idxd_attribute_groups[] = {
  1284. &idxd_device_attribute_group,
  1285. NULL,
  1286. };
  1287. static void idxd_conf_device_release(struct device *dev)
  1288. {
  1289. struct idxd_device *idxd = confdev_to_idxd(dev);
  1290. kfree(idxd->groups);
  1291. bitmap_free(idxd->wq_enable_map);
  1292. kfree(idxd->wqs);
  1293. kfree(idxd->engines);
  1294. ida_free(&idxd_ida, idxd->id);
  1295. bitmap_free(idxd->opcap_bmap);
  1296. kfree(idxd);
  1297. }
  1298. struct device_type dsa_device_type = {
  1299. .name = "dsa",
  1300. .release = idxd_conf_device_release,
  1301. .groups = idxd_attribute_groups,
  1302. };
  1303. struct device_type iax_device_type = {
  1304. .name = "iax",
  1305. .release = idxd_conf_device_release,
  1306. .groups = idxd_attribute_groups,
  1307. };
  1308. static int idxd_register_engine_devices(struct idxd_device *idxd)
  1309. {
  1310. struct idxd_engine *engine;
  1311. int i, j, rc;
  1312. for (i = 0; i < idxd->max_engines; i++) {
  1313. engine = idxd->engines[i];
  1314. rc = device_add(engine_confdev(engine));
  1315. if (rc < 0)
  1316. goto cleanup;
  1317. }
  1318. return 0;
  1319. cleanup:
  1320. j = i - 1;
  1321. for (; i < idxd->max_engines; i++) {
  1322. engine = idxd->engines[i];
  1323. put_device(engine_confdev(engine));
  1324. }
  1325. while (j--) {
  1326. engine = idxd->engines[j];
  1327. device_unregister(engine_confdev(engine));
  1328. }
  1329. return rc;
  1330. }
  1331. static int idxd_register_group_devices(struct idxd_device *idxd)
  1332. {
  1333. struct idxd_group *group;
  1334. int i, j, rc;
  1335. for (i = 0; i < idxd->max_groups; i++) {
  1336. group = idxd->groups[i];
  1337. rc = device_add(group_confdev(group));
  1338. if (rc < 0)
  1339. goto cleanup;
  1340. }
  1341. return 0;
  1342. cleanup:
  1343. j = i - 1;
  1344. for (; i < idxd->max_groups; i++) {
  1345. group = idxd->groups[i];
  1346. put_device(group_confdev(group));
  1347. }
  1348. while (j--) {
  1349. group = idxd->groups[j];
  1350. device_unregister(group_confdev(group));
  1351. }
  1352. return rc;
  1353. }
  1354. static int idxd_register_wq_devices(struct idxd_device *idxd)
  1355. {
  1356. struct idxd_wq *wq;
  1357. int i, rc, j;
  1358. for (i = 0; i < idxd->max_wqs; i++) {
  1359. wq = idxd->wqs[i];
  1360. rc = device_add(wq_confdev(wq));
  1361. if (rc < 0)
  1362. goto cleanup;
  1363. }
  1364. return 0;
  1365. cleanup:
  1366. j = i - 1;
  1367. for (; i < idxd->max_wqs; i++) {
  1368. wq = idxd->wqs[i];
  1369. put_device(wq_confdev(wq));
  1370. }
  1371. while (j--) {
  1372. wq = idxd->wqs[j];
  1373. device_unregister(wq_confdev(wq));
  1374. }
  1375. return rc;
  1376. }
  1377. int idxd_register_devices(struct idxd_device *idxd)
  1378. {
  1379. struct device *dev = &idxd->pdev->dev;
  1380. int rc, i;
  1381. rc = device_add(idxd_confdev(idxd));
  1382. if (rc < 0)
  1383. return rc;
  1384. rc = idxd_register_wq_devices(idxd);
  1385. if (rc < 0) {
  1386. dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
  1387. goto err_wq;
  1388. }
  1389. rc = idxd_register_engine_devices(idxd);
  1390. if (rc < 0) {
  1391. dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
  1392. goto err_engine;
  1393. }
  1394. rc = idxd_register_group_devices(idxd);
  1395. if (rc < 0) {
  1396. dev_dbg(dev, "Group device registering failed: %d\n", rc);
  1397. goto err_group;
  1398. }
  1399. return 0;
  1400. err_group:
  1401. for (i = 0; i < idxd->max_engines; i++)
  1402. device_unregister(engine_confdev(idxd->engines[i]));
  1403. err_engine:
  1404. for (i = 0; i < idxd->max_wqs; i++)
  1405. device_unregister(wq_confdev(idxd->wqs[i]));
  1406. err_wq:
  1407. device_del(idxd_confdev(idxd));
  1408. return rc;
  1409. }
  1410. void idxd_unregister_devices(struct idxd_device *idxd)
  1411. {
  1412. int i;
  1413. for (i = 0; i < idxd->max_wqs; i++) {
  1414. struct idxd_wq *wq = idxd->wqs[i];
  1415. device_unregister(wq_confdev(wq));
  1416. }
  1417. for (i = 0; i < idxd->max_engines; i++) {
  1418. struct idxd_engine *engine = idxd->engines[i];
  1419. device_unregister(engine_confdev(engine));
  1420. }
  1421. for (i = 0; i < idxd->max_groups; i++) {
  1422. struct idxd_group *group = idxd->groups[i];
  1423. device_unregister(group_confdev(group));
  1424. }
  1425. }
  1426. int idxd_register_bus_type(void)
  1427. {
  1428. return bus_register(&dsa_bus_type);
  1429. }
  1430. void idxd_unregister_bus_type(void)
  1431. {
  1432. bus_unregister(&dsa_bus_type);
  1433. }