perf_pai_ext.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Performance event support - Processor Activity Instrumentation Extension
  4. * Facility
  5. *
  6. * Copyright IBM Corp. 2022
  7. * Author(s): Thomas Richter <[email protected]>
  8. */
  9. #define KMSG_COMPONENT "pai_ext"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/kernel.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/percpu.h>
  14. #include <linux/notifier.h>
  15. #include <linux/init.h>
  16. #include <linux/export.h>
  17. #include <linux/io.h>
  18. #include <asm/cpu_mcf.h>
  19. #include <asm/ctl_reg.h>
  20. #include <asm/pai.h>
  21. #include <asm/debug.h>
  22. #define PAIE1_CB_SZ 0x200 /* Size of PAIE1 control block */
  23. #define PAIE1_CTRBLOCK_SZ 0x400 /* Size of PAIE1 counter blocks */
  24. static debug_info_t *paiext_dbg;
  25. static unsigned int paiext_cnt; /* Extracted with QPACI instruction */
  26. enum paiext_mode {
  27. PAI_MODE_NONE,
  28. PAI_MODE_SAMPLING,
  29. PAI_MODE_COUNTER,
  30. };
  31. struct pai_userdata {
  32. u16 num;
  33. u64 value;
  34. } __packed;
  35. /* Create the PAI extension 1 control block area.
  36. * The PAI extension control block 1 is pointed to by lowcore
  37. * address 0x1508 for each CPU. This control block is 512 bytes in size
  38. * and requires a 512 byte boundary alignment.
  39. */
  40. struct paiext_cb { /* PAI extension 1 control block */
  41. u64 header; /* Not used */
  42. u64 reserved1;
  43. u64 acc; /* Addr to analytics counter control block */
  44. u8 reserved2[488];
  45. } __packed;
  46. struct paiext_map {
  47. unsigned long *area; /* Area for CPU to store counters */
  48. struct pai_userdata *save; /* Area to store non-zero counters */
  49. enum paiext_mode mode; /* Type of event */
  50. unsigned int active_events; /* # of PAI Extension users */
  51. unsigned int refcnt;
  52. struct perf_event *event; /* Perf event for sampling */
  53. struct paiext_cb *paiext_cb; /* PAI extension control block area */
  54. };
  55. struct paiext_mapptr {
  56. struct paiext_map *mapptr;
  57. };
  58. static struct paiext_root { /* Anchor to per CPU data */
  59. int refcnt; /* Overall active events */
  60. struct paiext_mapptr __percpu *mapptr;
  61. } paiext_root;
  62. /* Free per CPU data when the last event is removed. */
  63. static void paiext_root_free(void)
  64. {
  65. if (!--paiext_root.refcnt) {
  66. free_percpu(paiext_root.mapptr);
  67. paiext_root.mapptr = NULL;
  68. }
  69. }
  70. /* On initialization of first event also allocate per CPU data dynamically.
  71. * Start with an array of pointers, the array size is the maximum number of
  72. * CPUs possible, which might be larger than the number of CPUs currently
  73. * online.
  74. */
  75. static int paiext_root_alloc(void)
  76. {
  77. if (++paiext_root.refcnt == 1) {
  78. /* The memory is already zeroed. */
  79. paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
  80. if (!paiext_root.mapptr) {
  81. /* Returing without refcnt adjustment is ok. The
  82. * error code is handled by paiext_alloc() which
  83. * decrements refcnt when an event can not be
  84. * created.
  85. */
  86. return -ENOMEM;
  87. }
  88. }
  89. return 0;
  90. }
  91. /* Protects against concurrent increment of sampler and counter member
  92. * increments at the same time and prohibits concurrent execution of
  93. * counting and sampling events.
  94. * Ensures that analytics counter block is deallocated only when the
  95. * sampling and counting on that cpu is zero.
  96. * For details see paiext_alloc().
  97. */
  98. static DEFINE_MUTEX(paiext_reserve_mutex);
  99. /* Free all memory allocated for event counting/sampling setup */
  100. static void paiext_free(struct paiext_mapptr *mp)
  101. {
  102. kfree(mp->mapptr->area);
  103. kfree(mp->mapptr->paiext_cb);
  104. kvfree(mp->mapptr->save);
  105. kfree(mp->mapptr);
  106. mp->mapptr = NULL;
  107. }
  108. /* Release the PMU if event is the last perf event */
  109. static void paiext_event_destroy(struct perf_event *event)
  110. {
  111. struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
  112. struct paiext_map *cpump = mp->mapptr;
  113. mutex_lock(&paiext_reserve_mutex);
  114. cpump->event = NULL;
  115. if (!--cpump->refcnt) /* Last reference gone */
  116. paiext_free(mp);
  117. paiext_root_free();
  118. mutex_unlock(&paiext_reserve_mutex);
  119. debug_sprintf_event(paiext_dbg, 4, "%s cpu %d mapptr %p\n", __func__,
  120. event->cpu, mp->mapptr);
  121. }
  122. /* Used to avoid races in checking concurrent access of counting and
  123. * sampling for pai_extension events.
  124. *
  125. * Only one instance of event pai_ext/NNPA_ALL/ for sampling is
  126. * allowed and when this event is running, no counting event is allowed.
  127. * Several counting events are allowed in parallel, but no sampling event
  128. * is allowed while one (or more) counting events are running.
  129. *
  130. * This function is called in process context and it is safe to block.
  131. * When the event initialization functions fails, no other call back will
  132. * be invoked.
  133. *
  134. * Allocate the memory for the event.
  135. */
  136. static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
  137. {
  138. struct paiext_mapptr *mp;
  139. struct paiext_map *cpump;
  140. int rc;
  141. mutex_lock(&paiext_reserve_mutex);
  142. rc = paiext_root_alloc();
  143. if (rc)
  144. goto unlock;
  145. mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
  146. cpump = mp->mapptr;
  147. if (!cpump) { /* Paiext_map allocated? */
  148. rc = -ENOMEM;
  149. cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
  150. if (!cpump)
  151. goto unlock;
  152. /* Allocate memory for counter area and counter extraction.
  153. * These are
  154. * - a 512 byte block and requires 512 byte boundary alignment.
  155. * - a 1KB byte block and requires 1KB boundary alignment.
  156. * Only the first counting event has to allocate the area.
  157. *
  158. * Note: This works with commit 59bb47985c1d by default.
  159. * Backporting this to kernels without this commit might
  160. * need adjustment.
  161. */
  162. mp->mapptr = cpump;
  163. cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL);
  164. cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
  165. cpump->save = kvmalloc_array(paiext_cnt + 1,
  166. sizeof(struct pai_userdata),
  167. GFP_KERNEL);
  168. if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
  169. paiext_free(mp);
  170. goto unlock;
  171. }
  172. cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
  173. : PAI_MODE_COUNTER;
  174. } else {
  175. /* Multiple invocation, check whats active.
  176. * Supported are multiple counter events or only one sampling
  177. * event concurrently at any one time.
  178. */
  179. if (cpump->mode == PAI_MODE_SAMPLING ||
  180. (cpump->mode == PAI_MODE_COUNTER && a->sample_period)) {
  181. rc = -EBUSY;
  182. goto unlock;
  183. }
  184. }
  185. rc = 0;
  186. cpump->event = event;
  187. ++cpump->refcnt;
  188. unlock:
  189. if (rc) {
  190. /* Error in allocation of event, decrement anchor. Since
  191. * the event in not created, its destroy() function is never
  192. * invoked. Adjust the reference counter for the anchor.
  193. */
  194. paiext_root_free();
  195. }
  196. mutex_unlock(&paiext_reserve_mutex);
  197. /* If rc is non-zero, no increment of counter/sampler was done. */
  198. return rc;
  199. }
  200. /* The PAI extension 1 control block supports up to 128 entries. Return
  201. * the index within PAIE1_CB given the event number. Also validate event
  202. * number.
  203. */
  204. static int paiext_event_valid(struct perf_event *event)
  205. {
  206. u64 cfg = event->attr.config;
  207. if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) {
  208. /* Offset NNPA in paiext_cb */
  209. event->hw.config_base = offsetof(struct paiext_cb, acc);
  210. return 0;
  211. }
  212. return -EINVAL;
  213. }
  214. /* Might be called on different CPU than the one the event is intended for. */
  215. static int paiext_event_init(struct perf_event *event)
  216. {
  217. struct perf_event_attr *a = &event->attr;
  218. int rc;
  219. /* PMU pai_ext registered as PERF_TYPE_RAW, check event type */
  220. if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
  221. return -ENOENT;
  222. /* PAI extension event must be valid and in supported range */
  223. rc = paiext_event_valid(event);
  224. if (rc)
  225. return rc;
  226. /* Allow only CPU wide operation, no process context for now. */
  227. if (event->hw.target || event->cpu == -1)
  228. return -ENOENT;
  229. /* Allow only event NNPA_ALL for sampling. */
  230. if (a->sample_period && a->config != PAI_NNPA_BASE)
  231. return -EINVAL;
  232. /* Prohibit exclude_user event selection */
  233. if (a->exclude_user)
  234. return -EINVAL;
  235. rc = paiext_alloc(a, event);
  236. if (rc)
  237. return rc;
  238. event->hw.last_tag = 0;
  239. event->destroy = paiext_event_destroy;
  240. if (a->sample_period) {
  241. a->sample_period = 1;
  242. a->freq = 0;
  243. /* Register for paicrypt_sched_task() to be called */
  244. event->attach_state |= PERF_ATTACH_SCHED_CB;
  245. /* Add raw data which are the memory mapped counters */
  246. a->sample_type |= PERF_SAMPLE_RAW;
  247. /* Turn off inheritance */
  248. a->inherit = 0;
  249. }
  250. return 0;
  251. }
  252. static u64 paiext_getctr(struct paiext_map *cpump, int nr)
  253. {
  254. return cpump->area[nr];
  255. }
  256. /* Read the counter values. Return value from location in buffer. For event
  257. * NNPA_ALL sum up all events.
  258. */
  259. static u64 paiext_getdata(struct perf_event *event)
  260. {
  261. struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
  262. struct paiext_map *cpump = mp->mapptr;
  263. u64 sum = 0;
  264. int i;
  265. if (event->attr.config != PAI_NNPA_BASE)
  266. return paiext_getctr(cpump, event->attr.config - PAI_NNPA_BASE);
  267. for (i = 1; i <= paiext_cnt; i++)
  268. sum += paiext_getctr(cpump, i);
  269. return sum;
  270. }
  271. static u64 paiext_getall(struct perf_event *event)
  272. {
  273. return paiext_getdata(event);
  274. }
  275. static void paiext_read(struct perf_event *event)
  276. {
  277. u64 prev, new, delta;
  278. prev = local64_read(&event->hw.prev_count);
  279. new = paiext_getall(event);
  280. local64_set(&event->hw.prev_count, new);
  281. delta = new - prev;
  282. local64_add(delta, &event->count);
  283. }
  284. static void paiext_start(struct perf_event *event, int flags)
  285. {
  286. u64 sum;
  287. if (event->hw.last_tag)
  288. return;
  289. event->hw.last_tag = 1;
  290. sum = paiext_getall(event); /* Get current value */
  291. local64_set(&event->hw.prev_count, sum);
  292. local64_set(&event->count, 0);
  293. }
  294. static int paiext_add(struct perf_event *event, int flags)
  295. {
  296. struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
  297. struct paiext_map *cpump = mp->mapptr;
  298. struct paiext_cb *pcb = cpump->paiext_cb;
  299. if (++cpump->active_events == 1) {
  300. S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
  301. pcb->acc = virt_to_phys(cpump->area) | 0x1;
  302. /* Enable CPU instruction lookup for PAIE1 control block */
  303. __ctl_set_bit(0, 49);
  304. debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
  305. __func__, S390_lowcore.aicd, pcb->acc);
  306. }
  307. if (flags & PERF_EF_START && !event->attr.sample_period) {
  308. /* Only counting needs initial counter value */
  309. paiext_start(event, PERF_EF_RELOAD);
  310. }
  311. event->hw.state = 0;
  312. if (event->attr.sample_period) {
  313. cpump->event = event;
  314. perf_sched_cb_inc(event->pmu);
  315. }
  316. return 0;
  317. }
  318. static void paiext_stop(struct perf_event *event, int flags)
  319. {
  320. paiext_read(event);
  321. event->hw.state = PERF_HES_STOPPED;
  322. }
  323. static void paiext_del(struct perf_event *event, int flags)
  324. {
  325. struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
  326. struct paiext_map *cpump = mp->mapptr;
  327. struct paiext_cb *pcb = cpump->paiext_cb;
  328. if (event->attr.sample_period)
  329. perf_sched_cb_dec(event->pmu);
  330. if (!event->attr.sample_period) {
  331. /* Only counting needs to read counter */
  332. paiext_stop(event, PERF_EF_UPDATE);
  333. }
  334. if (--cpump->active_events == 0) {
  335. /* Disable CPU instruction lookup for PAIE1 control block */
  336. __ctl_clear_bit(0, 49);
  337. pcb->acc = 0;
  338. S390_lowcore.aicd = 0;
  339. debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
  340. __func__, S390_lowcore.aicd, pcb->acc);
  341. }
  342. }
  343. /* Create raw data and save it in buffer. Returns number of bytes copied.
  344. * Saves only positive counter entries of the form
  345. * 2 bytes: Number of counter
  346. * 8 bytes: Value of counter
  347. */
  348. static size_t paiext_copy(struct paiext_map *cpump)
  349. {
  350. struct pai_userdata *userdata = cpump->save;
  351. int i, outidx = 0;
  352. for (i = 1; i <= paiext_cnt; i++) {
  353. u64 val = paiext_getctr(cpump, i);
  354. if (val) {
  355. userdata[outidx].num = i;
  356. userdata[outidx].value = val;
  357. outidx++;
  358. }
  359. }
  360. return outidx * sizeof(*userdata);
  361. }
  362. /* Write sample when one or more counters values are nonzero.
  363. *
  364. * Note: The function paiext_sched_task() and paiext_push_sample() are not
  365. * invoked after function paiext_del() has been called because of function
  366. * perf_sched_cb_dec().
  367. * The function paiext_sched_task() and paiext_push_sample() are only
  368. * called when sampling is active. Function perf_sched_cb_inc()
  369. * has been invoked to install function paiext_sched_task() as call back
  370. * to run at context switch time (see paiext_add()).
  371. *
  372. * This causes function perf_event_context_sched_out() and
  373. * perf_event_context_sched_in() to check whether the PMU has installed an
  374. * sched_task() callback. That callback is not active after paiext_del()
  375. * returns and has deleted the event on that CPU.
  376. */
  377. static int paiext_push_sample(void)
  378. {
  379. struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
  380. struct paiext_map *cpump = mp->mapptr;
  381. struct perf_event *event = cpump->event;
  382. struct perf_sample_data data;
  383. struct perf_raw_record raw;
  384. struct pt_regs regs;
  385. size_t rawsize;
  386. int overflow;
  387. rawsize = paiext_copy(cpump);
  388. if (!rawsize) /* No incremented counters */
  389. return 0;
  390. /* Setup perf sample */
  391. memset(&regs, 0, sizeof(regs));
  392. memset(&raw, 0, sizeof(raw));
  393. memset(&data, 0, sizeof(data));
  394. perf_sample_data_init(&data, 0, event->hw.last_period);
  395. if (event->attr.sample_type & PERF_SAMPLE_TID) {
  396. data.tid_entry.pid = task_tgid_nr(current);
  397. data.tid_entry.tid = task_pid_nr(current);
  398. }
  399. if (event->attr.sample_type & PERF_SAMPLE_TIME)
  400. data.time = event->clock();
  401. if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
  402. data.id = event->id;
  403. if (event->attr.sample_type & PERF_SAMPLE_CPU)
  404. data.cpu_entry.cpu = smp_processor_id();
  405. if (event->attr.sample_type & PERF_SAMPLE_RAW) {
  406. raw.frag.size = rawsize;
  407. raw.frag.data = cpump->save;
  408. raw.size = raw.frag.size;
  409. data.raw = &raw;
  410. data.sample_flags |= PERF_SAMPLE_RAW;
  411. }
  412. overflow = perf_event_overflow(event, &data, &regs);
  413. perf_event_update_userpage(event);
  414. /* Clear lowcore area after read */
  415. memset(cpump->area, 0, PAIE1_CTRBLOCK_SZ);
  416. return overflow;
  417. }
  418. /* Called on schedule-in and schedule-out. No access to event structure,
  419. * but for sampling only event NNPA_ALL is allowed.
  420. */
  421. static void paiext_sched_task(struct perf_event_context *ctx, bool sched_in)
  422. {
  423. /* We started with a clean page on event installation. So read out
  424. * results on schedule_out and if page was dirty, clear values.
  425. */
  426. if (!sched_in)
  427. paiext_push_sample();
  428. }
  429. /* Attribute definitions for pai extension1 interface. As with other CPU
  430. * Measurement Facilities, there is one attribute per mapped counter.
  431. * The number of mapped counters may vary per machine generation. Use
  432. * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
  433. * to determine the number of mapped counters. The instructions returns
  434. * a positive number, which is the highest number of supported counters.
  435. * All counters less than this number are also supported, there are no
  436. * holes. A returned number of zero means no support for mapped counters.
  437. *
  438. * The identification of the counter is a unique number. The chosen range
  439. * is 0x1800 + offset in mapped kernel page.
  440. * All CPU Measurement Facility counters identifiers must be unique and
  441. * the numbers from 0 to 496 are already used for the CPU Measurement
  442. * Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography
  443. * counters.
  444. * Numbers 0xb0000, 0xbc000 and 0xbd000 are already
  445. * used for the CPU Measurement Sampling facility.
  446. */
  447. PMU_FORMAT_ATTR(event, "config:0-63");
  448. static struct attribute *paiext_format_attr[] = {
  449. &format_attr_event.attr,
  450. NULL,
  451. };
  452. static struct attribute_group paiext_events_group = {
  453. .name = "events",
  454. .attrs = NULL, /* Filled in attr_event_init() */
  455. };
  456. static struct attribute_group paiext_format_group = {
  457. .name = "format",
  458. .attrs = paiext_format_attr,
  459. };
  460. static const struct attribute_group *paiext_attr_groups[] = {
  461. &paiext_events_group,
  462. &paiext_format_group,
  463. NULL,
  464. };
  465. /* Performance monitoring unit for mapped counters */
  466. static struct pmu paiext = {
  467. .task_ctx_nr = perf_invalid_context,
  468. .event_init = paiext_event_init,
  469. .add = paiext_add,
  470. .del = paiext_del,
  471. .start = paiext_start,
  472. .stop = paiext_stop,
  473. .read = paiext_read,
  474. .sched_task = paiext_sched_task,
  475. .attr_groups = paiext_attr_groups,
  476. };
  477. /* List of symbolic PAI extension 1 NNPA counter names. */
  478. static const char * const paiext_ctrnames[] = {
  479. [0] = "NNPA_ALL",
  480. [1] = "NNPA_ADD",
  481. [2] = "NNPA_SUB",
  482. [3] = "NNPA_MUL",
  483. [4] = "NNPA_DIV",
  484. [5] = "NNPA_MIN",
  485. [6] = "NNPA_MAX",
  486. [7] = "NNPA_LOG",
  487. [8] = "NNPA_EXP",
  488. [9] = "NNPA_IBM_RESERVED_9",
  489. [10] = "NNPA_RELU",
  490. [11] = "NNPA_TANH",
  491. [12] = "NNPA_SIGMOID",
  492. [13] = "NNPA_SOFTMAX",
  493. [14] = "NNPA_BATCHNORM",
  494. [15] = "NNPA_MAXPOOL2D",
  495. [16] = "NNPA_AVGPOOL2D",
  496. [17] = "NNPA_LSTMACT",
  497. [18] = "NNPA_GRUACT",
  498. [19] = "NNPA_CONVOLUTION",
  499. [20] = "NNPA_MATMUL_OP",
  500. [21] = "NNPA_MATMUL_OP_BCAST23",
  501. [22] = "NNPA_SMALLBATCH",
  502. [23] = "NNPA_LARGEDIM",
  503. [24] = "NNPA_SMALLTENSOR",
  504. [25] = "NNPA_1MFRAME",
  505. [26] = "NNPA_2GFRAME",
  506. [27] = "NNPA_ACCESSEXCEPT",
  507. };
  508. static void __init attr_event_free(struct attribute **attrs, int num)
  509. {
  510. struct perf_pmu_events_attr *pa;
  511. struct device_attribute *dap;
  512. int i;
  513. for (i = 0; i < num; i++) {
  514. dap = container_of(attrs[i], struct device_attribute, attr);
  515. pa = container_of(dap, struct perf_pmu_events_attr, attr);
  516. kfree(pa);
  517. }
  518. kfree(attrs);
  519. }
  520. static int __init attr_event_init_one(struct attribute **attrs, int num)
  521. {
  522. struct perf_pmu_events_attr *pa;
  523. pa = kzalloc(sizeof(*pa), GFP_KERNEL);
  524. if (!pa)
  525. return -ENOMEM;
  526. sysfs_attr_init(&pa->attr.attr);
  527. pa->id = PAI_NNPA_BASE + num;
  528. pa->attr.attr.name = paiext_ctrnames[num];
  529. pa->attr.attr.mode = 0444;
  530. pa->attr.show = cpumf_events_sysfs_show;
  531. pa->attr.store = NULL;
  532. attrs[num] = &pa->attr.attr;
  533. return 0;
  534. }
  535. /* Create PMU sysfs event attributes on the fly. */
  536. static int __init attr_event_init(void)
  537. {
  538. struct attribute **attrs;
  539. int ret, i;
  540. attrs = kmalloc_array(ARRAY_SIZE(paiext_ctrnames) + 1, sizeof(*attrs),
  541. GFP_KERNEL);
  542. if (!attrs)
  543. return -ENOMEM;
  544. for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
  545. ret = attr_event_init_one(attrs, i);
  546. if (ret) {
  547. attr_event_free(attrs, i - 1);
  548. return ret;
  549. }
  550. }
  551. attrs[i] = NULL;
  552. paiext_events_group.attrs = attrs;
  553. return 0;
  554. }
  555. static int __init paiext_init(void)
  556. {
  557. struct qpaci_info_block ib;
  558. int rc = -ENOMEM;
  559. if (!test_facility(197))
  560. return 0;
  561. qpaci(&ib);
  562. paiext_cnt = ib.num_nnpa;
  563. if (paiext_cnt >= PAI_NNPA_MAXCTR)
  564. paiext_cnt = PAI_NNPA_MAXCTR;
  565. if (!paiext_cnt)
  566. return 0;
  567. rc = attr_event_init();
  568. if (rc) {
  569. pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n");
  570. return rc;
  571. }
  572. /* Setup s390dbf facility */
  573. paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
  574. if (!paiext_dbg) {
  575. pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
  576. rc = -ENOMEM;
  577. goto out_init;
  578. }
  579. debug_register_view(paiext_dbg, &debug_sprintf_view);
  580. rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1);
  581. if (rc) {
  582. pr_err("Registration of " KMSG_COMPONENT " PMU failed with "
  583. "rc=%i\n", rc);
  584. goto out_pmu;
  585. }
  586. return 0;
  587. out_pmu:
  588. debug_unregister_view(paiext_dbg, &debug_sprintf_view);
  589. debug_unregister(paiext_dbg);
  590. out_init:
  591. attr_event_free(paiext_events_group.attrs,
  592. ARRAY_SIZE(paiext_ctrnames) + 1);
  593. return rc;
  594. }
  595. device_initcall(paiext_init);