ctrlmondata.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Resource Director Technology(RDT)
  4. * - Cache Allocation code.
  5. *
  6. * Copyright (C) 2016 Intel Corporation
  7. *
  8. * Authors:
  9. * Fenghua Yu <[email protected]>
  10. * Tony Luck <[email protected]>
  11. *
  12. * More information about RDT be found in the Intel (R) x86 Architecture
  13. * Software Developer Manual June 2016, volume 3, section 17.17.
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/cpu.h>
  17. #include <linux/kernfs.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. /*
  22. * Check whether MBA bandwidth percentage value is correct. The value is
  23. * checked against the minimum and max bandwidth values specified by the
  24. * hardware. The allocated bandwidth percentage is rounded to the next
  25. * control step available on the hardware.
  26. */
  27. static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
  28. {
  29. unsigned long bw;
  30. int ret;
  31. /*
  32. * Only linear delay values is supported for current Intel SKUs.
  33. */
  34. if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
  35. rdt_last_cmd_puts("No support for non-linear MB domains\n");
  36. return false;
  37. }
  38. ret = kstrtoul(buf, 10, &bw);
  39. if (ret) {
  40. rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
  41. return false;
  42. }
  43. if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
  44. !is_mba_sc(r)) {
  45. rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
  46. r->membw.min_bw, r->default_ctrl);
  47. return false;
  48. }
  49. *data = roundup(bw, (unsigned long)r->membw.bw_gran);
  50. return true;
  51. }
  52. int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
  53. struct rdt_domain *d)
  54. {
  55. struct resctrl_staged_config *cfg;
  56. u32 closid = data->rdtgrp->closid;
  57. struct rdt_resource *r = s->res;
  58. unsigned long bw_val;
  59. cfg = &d->staged_config[s->conf_type];
  60. if (cfg->have_new_ctrl) {
  61. rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
  62. return -EINVAL;
  63. }
  64. if (!bw_validate(data->buf, &bw_val, r))
  65. return -EINVAL;
  66. if (is_mba_sc(r)) {
  67. d->mbps_val[closid] = bw_val;
  68. return 0;
  69. }
  70. cfg->new_ctrl = bw_val;
  71. cfg->have_new_ctrl = true;
  72. return 0;
  73. }
  74. /*
  75. * Check whether a cache bit mask is valid.
  76. * For Intel the SDM says:
  77. * Please note that all (and only) contiguous '1' combinations
  78. * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
  79. * Additionally Haswell requires at least two bits set.
  80. * AMD allows non-contiguous bitmasks.
  81. */
  82. static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
  83. {
  84. unsigned long first_bit, zero_bit, val;
  85. unsigned int cbm_len = r->cache.cbm_len;
  86. int ret;
  87. ret = kstrtoul(buf, 16, &val);
  88. if (ret) {
  89. rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
  90. return false;
  91. }
  92. if ((!r->cache.arch_has_empty_bitmaps && val == 0) ||
  93. val > r->default_ctrl) {
  94. rdt_last_cmd_puts("Mask out of range\n");
  95. return false;
  96. }
  97. first_bit = find_first_bit(&val, cbm_len);
  98. zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
  99. /* Are non-contiguous bitmaps allowed? */
  100. if (!r->cache.arch_has_sparse_bitmaps &&
  101. (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
  102. rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
  103. return false;
  104. }
  105. if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
  106. rdt_last_cmd_printf("Need at least %d bits in the mask\n",
  107. r->cache.min_cbm_bits);
  108. return false;
  109. }
  110. *data = val;
  111. return true;
  112. }
  113. /*
  114. * Read one cache bit mask (hex). Check that it is valid for the current
  115. * resource type.
  116. */
  117. int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
  118. struct rdt_domain *d)
  119. {
  120. struct rdtgroup *rdtgrp = data->rdtgrp;
  121. struct resctrl_staged_config *cfg;
  122. struct rdt_resource *r = s->res;
  123. u32 cbm_val;
  124. cfg = &d->staged_config[s->conf_type];
  125. if (cfg->have_new_ctrl) {
  126. rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
  127. return -EINVAL;
  128. }
  129. /*
  130. * Cannot set up more than one pseudo-locked region in a cache
  131. * hierarchy.
  132. */
  133. if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
  134. rdtgroup_pseudo_locked_in_hierarchy(d)) {
  135. rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
  136. return -EINVAL;
  137. }
  138. if (!cbm_validate(data->buf, &cbm_val, r))
  139. return -EINVAL;
  140. if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
  141. rdtgrp->mode == RDT_MODE_SHAREABLE) &&
  142. rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
  143. rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
  144. return -EINVAL;
  145. }
  146. /*
  147. * The CBM may not overlap with the CBM of another closid if
  148. * either is exclusive.
  149. */
  150. if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
  151. rdt_last_cmd_puts("Overlaps with exclusive group\n");
  152. return -EINVAL;
  153. }
  154. if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
  155. if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
  156. rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
  157. rdt_last_cmd_puts("Overlaps with other group\n");
  158. return -EINVAL;
  159. }
  160. }
  161. cfg->new_ctrl = cbm_val;
  162. cfg->have_new_ctrl = true;
  163. return 0;
  164. }
  165. /*
  166. * For each domain in this resource we expect to find a series of:
  167. * id=mask
  168. * separated by ";". The "id" is in decimal, and must match one of
  169. * the "id"s for this resource.
  170. */
  171. static int parse_line(char *line, struct resctrl_schema *s,
  172. struct rdtgroup *rdtgrp)
  173. {
  174. enum resctrl_conf_type t = s->conf_type;
  175. struct resctrl_staged_config *cfg;
  176. struct rdt_resource *r = s->res;
  177. struct rdt_parse_data data;
  178. char *dom = NULL, *id;
  179. struct rdt_domain *d;
  180. unsigned long dom_id;
  181. if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
  182. r->rid == RDT_RESOURCE_MBA) {
  183. rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
  184. return -EINVAL;
  185. }
  186. next:
  187. if (!line || line[0] == '\0')
  188. return 0;
  189. dom = strsep(&line, ";");
  190. id = strsep(&dom, "=");
  191. if (!dom || kstrtoul(id, 10, &dom_id)) {
  192. rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
  193. return -EINVAL;
  194. }
  195. dom = strim(dom);
  196. list_for_each_entry(d, &r->domains, list) {
  197. if (d->id == dom_id) {
  198. data.buf = dom;
  199. data.rdtgrp = rdtgrp;
  200. if (r->parse_ctrlval(&data, s, d))
  201. return -EINVAL;
  202. if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
  203. cfg = &d->staged_config[t];
  204. /*
  205. * In pseudo-locking setup mode and just
  206. * parsed a valid CBM that should be
  207. * pseudo-locked. Only one locked region per
  208. * resource group and domain so just do
  209. * the required initialization for single
  210. * region and return.
  211. */
  212. rdtgrp->plr->s = s;
  213. rdtgrp->plr->d = d;
  214. rdtgrp->plr->cbm = cfg->new_ctrl;
  215. d->plr = rdtgrp->plr;
  216. return 0;
  217. }
  218. goto next;
  219. }
  220. }
  221. return -EINVAL;
  222. }
  223. static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
  224. {
  225. switch (type) {
  226. default:
  227. case CDP_NONE:
  228. return closid;
  229. case CDP_CODE:
  230. return closid * 2 + 1;
  231. case CDP_DATA:
  232. return closid * 2;
  233. }
  234. }
  235. static bool apply_config(struct rdt_hw_domain *hw_dom,
  236. struct resctrl_staged_config *cfg, u32 idx,
  237. cpumask_var_t cpu_mask)
  238. {
  239. struct rdt_domain *dom = &hw_dom->d_resctrl;
  240. if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
  241. cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
  242. hw_dom->ctrl_val[idx] = cfg->new_ctrl;
  243. return true;
  244. }
  245. return false;
  246. }
  247. int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
  248. u32 closid, enum resctrl_conf_type t, u32 cfg_val)
  249. {
  250. struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
  251. struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
  252. u32 idx = get_config_index(closid, t);
  253. struct msr_param msr_param;
  254. if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
  255. return -EINVAL;
  256. hw_dom->ctrl_val[idx] = cfg_val;
  257. msr_param.res = r;
  258. msr_param.low = idx;
  259. msr_param.high = idx + 1;
  260. hw_res->msr_update(d, &msr_param, r);
  261. return 0;
  262. }
  263. int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
  264. {
  265. struct resctrl_staged_config *cfg;
  266. struct rdt_hw_domain *hw_dom;
  267. struct msr_param msr_param;
  268. enum resctrl_conf_type t;
  269. cpumask_var_t cpu_mask;
  270. struct rdt_domain *d;
  271. int cpu;
  272. u32 idx;
  273. if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
  274. return -ENOMEM;
  275. msr_param.res = NULL;
  276. list_for_each_entry(d, &r->domains, list) {
  277. hw_dom = resctrl_to_arch_dom(d);
  278. for (t = 0; t < CDP_NUM_TYPES; t++) {
  279. cfg = &hw_dom->d_resctrl.staged_config[t];
  280. if (!cfg->have_new_ctrl)
  281. continue;
  282. idx = get_config_index(closid, t);
  283. if (!apply_config(hw_dom, cfg, idx, cpu_mask))
  284. continue;
  285. if (!msr_param.res) {
  286. msr_param.low = idx;
  287. msr_param.high = msr_param.low + 1;
  288. msr_param.res = r;
  289. } else {
  290. msr_param.low = min(msr_param.low, idx);
  291. msr_param.high = max(msr_param.high, idx + 1);
  292. }
  293. }
  294. }
  295. if (cpumask_empty(cpu_mask))
  296. goto done;
  297. cpu = get_cpu();
  298. /* Update resource control msr on this CPU if it's in cpu_mask. */
  299. if (cpumask_test_cpu(cpu, cpu_mask))
  300. rdt_ctrl_update(&msr_param);
  301. /* Update resource control msr on other CPUs. */
  302. smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
  303. put_cpu();
  304. done:
  305. free_cpumask_var(cpu_mask);
  306. return 0;
  307. }
  308. static int rdtgroup_parse_resource(char *resname, char *tok,
  309. struct rdtgroup *rdtgrp)
  310. {
  311. struct resctrl_schema *s;
  312. list_for_each_entry(s, &resctrl_schema_all, list) {
  313. if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
  314. return parse_line(tok, s, rdtgrp);
  315. }
  316. rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
  317. return -EINVAL;
  318. }
  319. ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
  320. char *buf, size_t nbytes, loff_t off)
  321. {
  322. struct resctrl_schema *s;
  323. struct rdtgroup *rdtgrp;
  324. struct rdt_resource *r;
  325. char *tok, *resname;
  326. int ret = 0;
  327. /* Valid input requires a trailing newline */
  328. if (nbytes == 0 || buf[nbytes - 1] != '\n')
  329. return -EINVAL;
  330. buf[nbytes - 1] = '\0';
  331. cpus_read_lock();
  332. rdtgrp = rdtgroup_kn_lock_live(of->kn);
  333. if (!rdtgrp) {
  334. rdtgroup_kn_unlock(of->kn);
  335. cpus_read_unlock();
  336. return -ENOENT;
  337. }
  338. rdt_last_cmd_clear();
  339. /*
  340. * No changes to pseudo-locked region allowed. It has to be removed
  341. * and re-created instead.
  342. */
  343. if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
  344. ret = -EINVAL;
  345. rdt_last_cmd_puts("Resource group is pseudo-locked\n");
  346. goto out;
  347. }
  348. rdt_staged_configs_clear();
  349. while ((tok = strsep(&buf, "\n")) != NULL) {
  350. resname = strim(strsep(&tok, ":"));
  351. if (!tok) {
  352. rdt_last_cmd_puts("Missing ':'\n");
  353. ret = -EINVAL;
  354. goto out;
  355. }
  356. if (tok[0] == '\0') {
  357. rdt_last_cmd_printf("Missing '%s' value\n", resname);
  358. ret = -EINVAL;
  359. goto out;
  360. }
  361. ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
  362. if (ret)
  363. goto out;
  364. }
  365. list_for_each_entry(s, &resctrl_schema_all, list) {
  366. r = s->res;
  367. /*
  368. * Writes to mba_sc resources update the software controller,
  369. * not the control MSR.
  370. */
  371. if (is_mba_sc(r))
  372. continue;
  373. ret = resctrl_arch_update_domains(r, rdtgrp->closid);
  374. if (ret)
  375. goto out;
  376. }
  377. if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
  378. /*
  379. * If pseudo-locking fails we keep the resource group in
  380. * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
  381. * active and updated for just the domain the pseudo-locked
  382. * region was requested for.
  383. */
  384. ret = rdtgroup_pseudo_lock_create(rdtgrp);
  385. }
  386. out:
  387. rdt_staged_configs_clear();
  388. rdtgroup_kn_unlock(of->kn);
  389. cpus_read_unlock();
  390. return ret ?: nbytes;
  391. }
  392. u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
  393. u32 closid, enum resctrl_conf_type type)
  394. {
  395. struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
  396. u32 idx = get_config_index(closid, type);
  397. return hw_dom->ctrl_val[idx];
  398. }
  399. static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
  400. {
  401. struct rdt_resource *r = schema->res;
  402. struct rdt_domain *dom;
  403. bool sep = false;
  404. u32 ctrl_val;
  405. seq_printf(s, "%*s:", max_name_width, schema->name);
  406. list_for_each_entry(dom, &r->domains, list) {
  407. if (sep)
  408. seq_puts(s, ";");
  409. if (is_mba_sc(r))
  410. ctrl_val = dom->mbps_val[closid];
  411. else
  412. ctrl_val = resctrl_arch_get_config(r, dom, closid,
  413. schema->conf_type);
  414. seq_printf(s, r->format_str, dom->id, max_data_width,
  415. ctrl_val);
  416. sep = true;
  417. }
  418. seq_puts(s, "\n");
  419. }
  420. int rdtgroup_schemata_show(struct kernfs_open_file *of,
  421. struct seq_file *s, void *v)
  422. {
  423. struct resctrl_schema *schema;
  424. struct rdtgroup *rdtgrp;
  425. int ret = 0;
  426. u32 closid;
  427. rdtgrp = rdtgroup_kn_lock_live(of->kn);
  428. if (rdtgrp) {
  429. if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
  430. list_for_each_entry(schema, &resctrl_schema_all, list) {
  431. seq_printf(s, "%s:uninitialized\n", schema->name);
  432. }
  433. } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
  434. if (!rdtgrp->plr->d) {
  435. rdt_last_cmd_clear();
  436. rdt_last_cmd_puts("Cache domain offline\n");
  437. ret = -ENODEV;
  438. } else {
  439. seq_printf(s, "%s:%d=%x\n",
  440. rdtgrp->plr->s->res->name,
  441. rdtgrp->plr->d->id,
  442. rdtgrp->plr->cbm);
  443. }
  444. } else {
  445. closid = rdtgrp->closid;
  446. list_for_each_entry(schema, &resctrl_schema_all, list) {
  447. if (closid < schema->num_closid)
  448. show_doms(s, schema, closid);
  449. }
  450. }
  451. } else {
  452. ret = -ENOENT;
  453. }
  454. rdtgroup_kn_unlock(of->kn);
  455. return ret;
  456. }
  457. void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
  458. struct rdt_domain *d, struct rdtgroup *rdtgrp,
  459. int evtid, int first)
  460. {
  461. /*
  462. * setup the parameters to send to the IPI to read the data.
  463. */
  464. rr->rgrp = rdtgrp;
  465. rr->evtid = evtid;
  466. rr->r = r;
  467. rr->d = d;
  468. rr->val = 0;
  469. rr->first = first;
  470. smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
  471. }
  472. int rdtgroup_mondata_show(struct seq_file *m, void *arg)
  473. {
  474. struct kernfs_open_file *of = m->private;
  475. u32 resid, evtid, domid;
  476. struct rdtgroup *rdtgrp;
  477. struct rdt_resource *r;
  478. union mon_data_bits md;
  479. struct rdt_domain *d;
  480. struct rmid_read rr;
  481. int ret = 0;
  482. rdtgrp = rdtgroup_kn_lock_live(of->kn);
  483. if (!rdtgrp) {
  484. ret = -ENOENT;
  485. goto out;
  486. }
  487. md.priv = of->kn->priv;
  488. resid = md.u.rid;
  489. domid = md.u.domid;
  490. evtid = md.u.evtid;
  491. r = &rdt_resources_all[resid].r_resctrl;
  492. d = rdt_find_domain(r, domid, NULL);
  493. if (IS_ERR_OR_NULL(d)) {
  494. ret = -ENOENT;
  495. goto out;
  496. }
  497. mon_event_read(&rr, r, d, rdtgrp, evtid, false);
  498. if (rr.err == -EIO)
  499. seq_puts(m, "Error\n");
  500. else if (rr.err == -EINVAL)
  501. seq_puts(m, "Unavailable\n");
  502. else
  503. seq_printf(m, "%llu\n", rr.val);
  504. out:
  505. rdtgroup_kn_unlock(of->kn);
  506. return ret;
  507. }