perf_pai_crypto.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Performance event support - Processor Activity Instrumentation Facility
  4. *
  5. * Copyright IBM Corp. 2022
  6. * Author(s): Thomas Richter <[email protected]>
  7. */
  8. #define KMSG_COMPONENT "pai_crypto"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/percpu.h>
  13. #include <linux/notifier.h>
  14. #include <linux/init.h>
  15. #include <linux/export.h>
  16. #include <linux/io.h>
  17. #include <linux/perf_event.h>
  18. #include <asm/ctl_reg.h>
  19. #include <asm/pai.h>
  20. #include <asm/debug.h>
  21. static debug_info_t *cfm_dbg;
  22. static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
  23. /* extracted with QPACI instruction */
  24. DEFINE_STATIC_KEY_FALSE(pai_key);
  25. struct pai_userdata {
  26. u16 num;
  27. u64 value;
  28. } __packed;
  29. struct paicrypt_map {
  30. unsigned long *page; /* Page for CPU to store counters */
  31. struct pai_userdata *save; /* Page to store no-zero counters */
  32. unsigned int users; /* # of PAI crypto users */
  33. unsigned int sampler; /* # of PAI crypto samplers */
  34. unsigned int counter; /* # of PAI crypto counters */
  35. struct perf_event *event; /* Perf event for sampling */
  36. };
  37. static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
  38. /* Release the PMU if event is the last perf event */
  39. static DEFINE_MUTEX(pai_reserve_mutex);
  40. /* Adjust usage counters and remove allocated memory when all users are
  41. * gone.
  42. */
  43. static void paicrypt_event_destroy(struct perf_event *event)
  44. {
  45. struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
  46. cpump->event = NULL;
  47. static_branch_dec(&pai_key);
  48. mutex_lock(&pai_reserve_mutex);
  49. if (event->attr.sample_period)
  50. cpump->sampler -= 1;
  51. else
  52. cpump->counter -= 1;
  53. debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d"
  54. " sampler %d counter %d\n", __func__,
  55. event->attr.config, event->cpu, cpump->sampler,
  56. cpump->counter);
  57. if (!cpump->counter && !cpump->sampler) {
  58. debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
  59. __func__, (unsigned long)cpump->page,
  60. cpump->save);
  61. free_page((unsigned long)cpump->page);
  62. cpump->page = NULL;
  63. kvfree(cpump->save);
  64. cpump->save = NULL;
  65. }
  66. mutex_unlock(&pai_reserve_mutex);
  67. }
  68. static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
  69. {
  70. if (kernel)
  71. nr += PAI_CRYPTO_MAXCTR;
  72. return cpump->page[nr];
  73. }
  74. /* Read the counter values. Return value from location in CMP. For event
  75. * CRYPTO_ALL sum up all events.
  76. */
  77. static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
  78. {
  79. struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
  80. u64 sum = 0;
  81. int i;
  82. if (event->attr.config != PAI_CRYPTO_BASE) {
  83. return paicrypt_getctr(cpump,
  84. event->attr.config - PAI_CRYPTO_BASE,
  85. kernel);
  86. }
  87. for (i = 1; i <= paicrypt_cnt; i++) {
  88. u64 val = paicrypt_getctr(cpump, i, kernel);
  89. if (!val)
  90. continue;
  91. sum += val;
  92. }
  93. return sum;
  94. }
  95. static u64 paicrypt_getall(struct perf_event *event)
  96. {
  97. u64 sum = 0;
  98. if (!event->attr.exclude_kernel)
  99. sum += paicrypt_getdata(event, true);
  100. if (!event->attr.exclude_user)
  101. sum += paicrypt_getdata(event, false);
  102. return sum;
  103. }
  104. /* Used to avoid races in checking concurrent access of counting and
  105. * sampling for crypto events
  106. *
  107. * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
  108. * allowed and when this event is running, no counting event is allowed.
  109. * Several counting events are allowed in parallel, but no sampling event
  110. * is allowed while one (or more) counting events are running.
  111. *
  112. * This function is called in process context and it is save to block.
  113. * When the event initialization functions fails, no other call back will
  114. * be invoked.
  115. *
  116. * Allocate the memory for the event.
  117. */
  118. static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
  119. {
  120. unsigned int *use_ptr;
  121. int rc = 0;
  122. mutex_lock(&pai_reserve_mutex);
  123. if (a->sample_period) { /* Sampling requested */
  124. use_ptr = &cpump->sampler;
  125. if (cpump->counter || cpump->sampler)
  126. rc = -EBUSY; /* ... sampling/counting active */
  127. } else { /* Counting requested */
  128. use_ptr = &cpump->counter;
  129. if (cpump->sampler)
  130. rc = -EBUSY; /* ... and sampling active */
  131. }
  132. if (rc)
  133. goto unlock;
  134. /* Allocate memory for counter page and counter extraction.
  135. * Only the first counting event has to allocate a page.
  136. */
  137. if (cpump->page)
  138. goto unlock;
  139. rc = -ENOMEM;
  140. cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
  141. if (!cpump->page)
  142. goto unlock;
  143. cpump->save = kvmalloc_array(paicrypt_cnt + 1,
  144. sizeof(struct pai_userdata), GFP_KERNEL);
  145. if (!cpump->save) {
  146. free_page((unsigned long)cpump->page);
  147. cpump->page = NULL;
  148. goto unlock;
  149. }
  150. rc = 0;
  151. unlock:
  152. /* If rc is non-zero, do not increment counter/sampler. */
  153. if (!rc)
  154. *use_ptr += 1;
  155. debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx sampler %d"
  156. " counter %d page %#lx save %p rc %d\n", __func__,
  157. a->sample_period, cpump->sampler, cpump->counter,
  158. (unsigned long)cpump->page, cpump->save, rc);
  159. mutex_unlock(&pai_reserve_mutex);
  160. return rc;
  161. }
  162. /* Might be called on different CPU than the one the event is intended for. */
  163. static int paicrypt_event_init(struct perf_event *event)
  164. {
  165. struct perf_event_attr *a = &event->attr;
  166. struct paicrypt_map *cpump;
  167. int rc;
  168. /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
  169. if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
  170. return -ENOENT;
  171. /* PAI crypto event must be in valid range */
  172. if (a->config < PAI_CRYPTO_BASE ||
  173. a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
  174. return -EINVAL;
  175. /* Allow only CPU wide operation, no process context for now. */
  176. if (event->hw.target || event->cpu == -1)
  177. return -ENOENT;
  178. /* Allow only CRYPTO_ALL for sampling. */
  179. if (a->sample_period && a->config != PAI_CRYPTO_BASE)
  180. return -EINVAL;
  181. cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
  182. rc = paicrypt_busy(a, cpump);
  183. if (rc)
  184. return rc;
  185. /* Event initialization sets last_tag to 0. When later on the events
  186. * are deleted and re-added, do not reset the event count value to zero.
  187. * Events are added, deleted and re-added when 2 or more events
  188. * are active at the same time.
  189. */
  190. event->hw.last_tag = 0;
  191. cpump->event = event;
  192. event->destroy = paicrypt_event_destroy;
  193. if (a->sample_period) {
  194. a->sample_period = 1;
  195. a->freq = 0;
  196. /* Register for paicrypt_sched_task() to be called */
  197. event->attach_state |= PERF_ATTACH_SCHED_CB;
  198. /* Add raw data which contain the memory mapped counters */
  199. a->sample_type |= PERF_SAMPLE_RAW;
  200. /* Turn off inheritance */
  201. a->inherit = 0;
  202. }
  203. static_branch_inc(&pai_key);
  204. return 0;
  205. }
  206. static void paicrypt_read(struct perf_event *event)
  207. {
  208. u64 prev, new, delta;
  209. prev = local64_read(&event->hw.prev_count);
  210. new = paicrypt_getall(event);
  211. local64_set(&event->hw.prev_count, new);
  212. delta = (prev <= new) ? new - prev
  213. : (-1ULL - prev) + new + 1; /* overflow */
  214. local64_add(delta, &event->count);
  215. }
  216. static void paicrypt_start(struct perf_event *event, int flags)
  217. {
  218. u64 sum;
  219. if (!event->hw.last_tag) {
  220. event->hw.last_tag = 1;
  221. sum = paicrypt_getall(event); /* Get current value */
  222. local64_set(&event->count, 0);
  223. local64_set(&event->hw.prev_count, sum);
  224. }
  225. }
  226. static int paicrypt_add(struct perf_event *event, int flags)
  227. {
  228. struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
  229. unsigned long ccd;
  230. if (cpump->users++ == 0) {
  231. ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
  232. WRITE_ONCE(S390_lowcore.ccd, ccd);
  233. __ctl_set_bit(0, 50);
  234. }
  235. cpump->event = event;
  236. if (flags & PERF_EF_START && !event->attr.sample_period) {
  237. /* Only counting needs initial counter value */
  238. paicrypt_start(event, PERF_EF_RELOAD);
  239. }
  240. event->hw.state = 0;
  241. if (event->attr.sample_period)
  242. perf_sched_cb_inc(event->pmu);
  243. return 0;
  244. }
  245. static void paicrypt_stop(struct perf_event *event, int flags)
  246. {
  247. paicrypt_read(event);
  248. event->hw.state = PERF_HES_STOPPED;
  249. }
  250. static void paicrypt_del(struct perf_event *event, int flags)
  251. {
  252. struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
  253. if (event->attr.sample_period)
  254. perf_sched_cb_dec(event->pmu);
  255. if (!event->attr.sample_period)
  256. /* Only counting needs to read counter */
  257. paicrypt_stop(event, PERF_EF_UPDATE);
  258. if (cpump->users-- == 1) {
  259. __ctl_clear_bit(0, 50);
  260. WRITE_ONCE(S390_lowcore.ccd, 0);
  261. }
  262. }
  263. /* Create raw data and save it in buffer. Returns number of bytes copied.
  264. * Saves only positive counter entries of the form
  265. * 2 bytes: Number of counter
  266. * 8 bytes: Value of counter
  267. */
  268. static size_t paicrypt_copy(struct pai_userdata *userdata,
  269. struct paicrypt_map *cpump,
  270. bool exclude_user, bool exclude_kernel)
  271. {
  272. int i, outidx = 0;
  273. for (i = 1; i <= paicrypt_cnt; i++) {
  274. u64 val = 0;
  275. if (!exclude_kernel)
  276. val += paicrypt_getctr(cpump, i, true);
  277. if (!exclude_user)
  278. val += paicrypt_getctr(cpump, i, false);
  279. if (val) {
  280. userdata[outidx].num = i;
  281. userdata[outidx].value = val;
  282. outidx++;
  283. }
  284. }
  285. return outidx * sizeof(struct pai_userdata);
  286. }
  287. static int paicrypt_push_sample(void)
  288. {
  289. struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
  290. struct perf_event *event = cpump->event;
  291. struct perf_sample_data data;
  292. struct perf_raw_record raw;
  293. struct pt_regs regs;
  294. size_t rawsize;
  295. int overflow;
  296. if (!cpump->event) /* No event active */
  297. return 0;
  298. rawsize = paicrypt_copy(cpump->save, cpump,
  299. cpump->event->attr.exclude_user,
  300. cpump->event->attr.exclude_kernel);
  301. if (!rawsize) /* No incremented counters */
  302. return 0;
  303. /* Setup perf sample */
  304. memset(&regs, 0, sizeof(regs));
  305. memset(&raw, 0, sizeof(raw));
  306. memset(&data, 0, sizeof(data));
  307. perf_sample_data_init(&data, 0, event->hw.last_period);
  308. if (event->attr.sample_type & PERF_SAMPLE_TID) {
  309. data.tid_entry.pid = task_tgid_nr(current);
  310. data.tid_entry.tid = task_pid_nr(current);
  311. }
  312. if (event->attr.sample_type & PERF_SAMPLE_TIME)
  313. data.time = event->clock();
  314. if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
  315. data.id = event->id;
  316. if (event->attr.sample_type & PERF_SAMPLE_CPU) {
  317. data.cpu_entry.cpu = smp_processor_id();
  318. data.cpu_entry.reserved = 0;
  319. }
  320. if (event->attr.sample_type & PERF_SAMPLE_RAW) {
  321. raw.frag.size = rawsize;
  322. raw.frag.data = cpump->save;
  323. raw.size = raw.frag.size;
  324. data.raw = &raw;
  325. data.sample_flags |= PERF_SAMPLE_RAW;
  326. }
  327. overflow = perf_event_overflow(event, &data, &regs);
  328. perf_event_update_userpage(event);
  329. /* Clear lowcore page after read */
  330. memset(cpump->page, 0, PAGE_SIZE);
  331. return overflow;
  332. }
  333. /* Called on schedule-in and schedule-out. No access to event structure,
  334. * but for sampling only event CRYPTO_ALL is allowed.
  335. */
  336. static void paicrypt_sched_task(struct perf_event_context *ctx, bool sched_in)
  337. {
  338. /* We started with a clean page on event installation. So read out
  339. * results on schedule_out and if page was dirty, clear values.
  340. */
  341. if (!sched_in)
  342. paicrypt_push_sample();
  343. }
  344. /* Attribute definitions for paicrypt interface. As with other CPU
  345. * Measurement Facilities, there is one attribute per mapped counter.
  346. * The number of mapped counters may vary per machine generation. Use
  347. * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
  348. * to determine the number of mapped counters. The instructions returns
  349. * a positive number, which is the highest number of supported counters.
  350. * All counters less than this number are also supported, there are no
  351. * holes. A returned number of zero means no support for mapped counters.
  352. *
  353. * The identification of the counter is a unique number. The chosen range
  354. * is 0x1000 + offset in mapped kernel page.
  355. * All CPU Measurement Facility counters identifiers must be unique and
  356. * the numbers from 0 to 496 are already used for the CPU Measurement
  357. * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
  358. * used for the CPU Measurement Sampling facility.
  359. */
  360. PMU_FORMAT_ATTR(event, "config:0-63");
  361. static struct attribute *paicrypt_format_attr[] = {
  362. &format_attr_event.attr,
  363. NULL,
  364. };
  365. static struct attribute_group paicrypt_events_group = {
  366. .name = "events",
  367. .attrs = NULL /* Filled in attr_event_init() */
  368. };
  369. static struct attribute_group paicrypt_format_group = {
  370. .name = "format",
  371. .attrs = paicrypt_format_attr,
  372. };
  373. static const struct attribute_group *paicrypt_attr_groups[] = {
  374. &paicrypt_events_group,
  375. &paicrypt_format_group,
  376. NULL,
  377. };
  378. /* Performance monitoring unit for mapped counters */
  379. static struct pmu paicrypt = {
  380. .task_ctx_nr = perf_invalid_context,
  381. .event_init = paicrypt_event_init,
  382. .add = paicrypt_add,
  383. .del = paicrypt_del,
  384. .start = paicrypt_start,
  385. .stop = paicrypt_stop,
  386. .read = paicrypt_read,
  387. .sched_task = paicrypt_sched_task,
  388. .attr_groups = paicrypt_attr_groups
  389. };
  390. /* List of symbolic PAI counter names. */
  391. static const char * const paicrypt_ctrnames[] = {
  392. [0] = "CRYPTO_ALL",
  393. [1] = "KM_DEA",
  394. [2] = "KM_TDEA_128",
  395. [3] = "KM_TDEA_192",
  396. [4] = "KM_ENCRYPTED_DEA",
  397. [5] = "KM_ENCRYPTED_TDEA_128",
  398. [6] = "KM_ENCRYPTED_TDEA_192",
  399. [7] = "KM_AES_128",
  400. [8] = "KM_AES_192",
  401. [9] = "KM_AES_256",
  402. [10] = "KM_ENCRYPTED_AES_128",
  403. [11] = "KM_ENCRYPTED_AES_192",
  404. [12] = "KM_ENCRYPTED_AES_256",
  405. [13] = "KM_XTS_AES_128",
  406. [14] = "KM_XTS_AES_256",
  407. [15] = "KM_XTS_ENCRYPTED_AES_128",
  408. [16] = "KM_XTS_ENCRYPTED_AES_256",
  409. [17] = "KMC_DEA",
  410. [18] = "KMC_TDEA_128",
  411. [19] = "KMC_TDEA_192",
  412. [20] = "KMC_ENCRYPTED_DEA",
  413. [21] = "KMC_ENCRYPTED_TDEA_128",
  414. [22] = "KMC_ENCRYPTED_TDEA_192",
  415. [23] = "KMC_AES_128",
  416. [24] = "KMC_AES_192",
  417. [25] = "KMC_AES_256",
  418. [26] = "KMC_ENCRYPTED_AES_128",
  419. [27] = "KMC_ENCRYPTED_AES_192",
  420. [28] = "KMC_ENCRYPTED_AES_256",
  421. [29] = "KMC_PRNG",
  422. [30] = "KMA_GCM_AES_128",
  423. [31] = "KMA_GCM_AES_192",
  424. [32] = "KMA_GCM_AES_256",
  425. [33] = "KMA_GCM_ENCRYPTED_AES_128",
  426. [34] = "KMA_GCM_ENCRYPTED_AES_192",
  427. [35] = "KMA_GCM_ENCRYPTED_AES_256",
  428. [36] = "KMF_DEA",
  429. [37] = "KMF_TDEA_128",
  430. [38] = "KMF_TDEA_192",
  431. [39] = "KMF_ENCRYPTED_DEA",
  432. [40] = "KMF_ENCRYPTED_TDEA_128",
  433. [41] = "KMF_ENCRYPTED_TDEA_192",
  434. [42] = "KMF_AES_128",
  435. [43] = "KMF_AES_192",
  436. [44] = "KMF_AES_256",
  437. [45] = "KMF_ENCRYPTED_AES_128",
  438. [46] = "KMF_ENCRYPTED_AES_192",
  439. [47] = "KMF_ENCRYPTED_AES_256",
  440. [48] = "KMCTR_DEA",
  441. [49] = "KMCTR_TDEA_128",
  442. [50] = "KMCTR_TDEA_192",
  443. [51] = "KMCTR_ENCRYPTED_DEA",
  444. [52] = "KMCTR_ENCRYPTED_TDEA_128",
  445. [53] = "KMCTR_ENCRYPTED_TDEA_192",
  446. [54] = "KMCTR_AES_128",
  447. [55] = "KMCTR_AES_192",
  448. [56] = "KMCTR_AES_256",
  449. [57] = "KMCTR_ENCRYPTED_AES_128",
  450. [58] = "KMCTR_ENCRYPTED_AES_192",
  451. [59] = "KMCTR_ENCRYPTED_AES_256",
  452. [60] = "KMO_DEA",
  453. [61] = "KMO_TDEA_128",
  454. [62] = "KMO_TDEA_192",
  455. [63] = "KMO_ENCRYPTED_DEA",
  456. [64] = "KMO_ENCRYPTED_TDEA_128",
  457. [65] = "KMO_ENCRYPTED_TDEA_192",
  458. [66] = "KMO_AES_128",
  459. [67] = "KMO_AES_192",
  460. [68] = "KMO_AES_256",
  461. [69] = "KMO_ENCRYPTED_AES_128",
  462. [70] = "KMO_ENCRYPTED_AES_192",
  463. [71] = "KMO_ENCRYPTED_AES_256",
  464. [72] = "KIMD_SHA_1",
  465. [73] = "KIMD_SHA_256",
  466. [74] = "KIMD_SHA_512",
  467. [75] = "KIMD_SHA3_224",
  468. [76] = "KIMD_SHA3_256",
  469. [77] = "KIMD_SHA3_384",
  470. [78] = "KIMD_SHA3_512",
  471. [79] = "KIMD_SHAKE_128",
  472. [80] = "KIMD_SHAKE_256",
  473. [81] = "KIMD_GHASH",
  474. [82] = "KLMD_SHA_1",
  475. [83] = "KLMD_SHA_256",
  476. [84] = "KLMD_SHA_512",
  477. [85] = "KLMD_SHA3_224",
  478. [86] = "KLMD_SHA3_256",
  479. [87] = "KLMD_SHA3_384",
  480. [88] = "KLMD_SHA3_512",
  481. [89] = "KLMD_SHAKE_128",
  482. [90] = "KLMD_SHAKE_256",
  483. [91] = "KMAC_DEA",
  484. [92] = "KMAC_TDEA_128",
  485. [93] = "KMAC_TDEA_192",
  486. [94] = "KMAC_ENCRYPTED_DEA",
  487. [95] = "KMAC_ENCRYPTED_TDEA_128",
  488. [96] = "KMAC_ENCRYPTED_TDEA_192",
  489. [97] = "KMAC_AES_128",
  490. [98] = "KMAC_AES_192",
  491. [99] = "KMAC_AES_256",
  492. [100] = "KMAC_ENCRYPTED_AES_128",
  493. [101] = "KMAC_ENCRYPTED_AES_192",
  494. [102] = "KMAC_ENCRYPTED_AES_256",
  495. [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
  496. [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
  497. [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
  498. [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
  499. [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
  500. [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
  501. [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
  502. [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
  503. [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
  504. [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
  505. [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
  506. [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
  507. [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
  508. [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
  509. [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
  510. [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
  511. [119] = "PCC_SCALAR_MULTIPLY_P256",
  512. [120] = "PCC_SCALAR_MULTIPLY_P384",
  513. [121] = "PCC_SCALAR_MULTIPLY_P521",
  514. [122] = "PCC_SCALAR_MULTIPLY_ED25519",
  515. [123] = "PCC_SCALAR_MULTIPLY_ED448",
  516. [124] = "PCC_SCALAR_MULTIPLY_X25519",
  517. [125] = "PCC_SCALAR_MULTIPLY_X448",
  518. [126] = "PRNO_SHA_512_DRNG",
  519. [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
  520. [128] = "PRNO_TRNG",
  521. [129] = "KDSA_ECDSA_VERIFY_P256",
  522. [130] = "KDSA_ECDSA_VERIFY_P384",
  523. [131] = "KDSA_ECDSA_VERIFY_P521",
  524. [132] = "KDSA_ECDSA_SIGN_P256",
  525. [133] = "KDSA_ECDSA_SIGN_P384",
  526. [134] = "KDSA_ECDSA_SIGN_P521",
  527. [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
  528. [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
  529. [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
  530. [138] = "KDSA_EDDSA_VERIFY_ED25519",
  531. [139] = "KDSA_EDDSA_VERIFY_ED448",
  532. [140] = "KDSA_EDDSA_SIGN_ED25519",
  533. [141] = "KDSA_EDDSA_SIGN_ED448",
  534. [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
  535. [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
  536. [144] = "PCKMO_ENCRYPT_DEA_KEY",
  537. [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
  538. [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
  539. [147] = "PCKMO_ENCRYPT_AES_128_KEY",
  540. [148] = "PCKMO_ENCRYPT_AES_192_KEY",
  541. [149] = "PCKMO_ENCRYPT_AES_256_KEY",
  542. [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
  543. [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
  544. [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
  545. [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
  546. [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
  547. [155] = "IBM_RESERVED_155",
  548. [156] = "IBM_RESERVED_156",
  549. };
  550. static void __init attr_event_free(struct attribute **attrs, int num)
  551. {
  552. struct perf_pmu_events_attr *pa;
  553. int i;
  554. for (i = 0; i < num; i++) {
  555. struct device_attribute *dap;
  556. dap = container_of(attrs[i], struct device_attribute, attr);
  557. pa = container_of(dap, struct perf_pmu_events_attr, attr);
  558. kfree(pa);
  559. }
  560. kfree(attrs);
  561. }
  562. static int __init attr_event_init_one(struct attribute **attrs, int num)
  563. {
  564. struct perf_pmu_events_attr *pa;
  565. pa = kzalloc(sizeof(*pa), GFP_KERNEL);
  566. if (!pa)
  567. return -ENOMEM;
  568. sysfs_attr_init(&pa->attr.attr);
  569. pa->id = PAI_CRYPTO_BASE + num;
  570. pa->attr.attr.name = paicrypt_ctrnames[num];
  571. pa->attr.attr.mode = 0444;
  572. pa->attr.show = cpumf_events_sysfs_show;
  573. pa->attr.store = NULL;
  574. attrs[num] = &pa->attr.attr;
  575. return 0;
  576. }
  577. /* Create PMU sysfs event attributes on the fly. */
  578. static int __init attr_event_init(void)
  579. {
  580. struct attribute **attrs;
  581. int ret, i;
  582. attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
  583. GFP_KERNEL);
  584. if (!attrs)
  585. return -ENOMEM;
  586. for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
  587. ret = attr_event_init_one(attrs, i);
  588. if (ret) {
  589. attr_event_free(attrs, i - 1);
  590. return ret;
  591. }
  592. }
  593. attrs[i] = NULL;
  594. paicrypt_events_group.attrs = attrs;
  595. return 0;
  596. }
  597. static int __init paicrypt_init(void)
  598. {
  599. struct qpaci_info_block ib;
  600. int rc;
  601. if (!test_facility(196))
  602. return 0;
  603. qpaci(&ib);
  604. paicrypt_cnt = ib.num_cc;
  605. if (paicrypt_cnt == 0)
  606. return 0;
  607. if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
  608. paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
  609. rc = attr_event_init(); /* Export known PAI crypto events */
  610. if (rc) {
  611. pr_err("Creation of PMU pai_crypto /sysfs failed\n");
  612. return rc;
  613. }
  614. /* Setup s390dbf facility */
  615. cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
  616. if (!cfm_dbg) {
  617. pr_err("Registration of s390dbf pai_crypto failed\n");
  618. return -ENOMEM;
  619. }
  620. debug_register_view(cfm_dbg, &debug_sprintf_view);
  621. rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
  622. if (rc) {
  623. pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
  624. rc);
  625. debug_unregister_view(cfm_dbg, &debug_sprintf_view);
  626. debug_unregister(cfm_dbg);
  627. return rc;
  628. }
  629. return 0;
  630. }
  631. device_initcall(paicrypt_init);