pmu-emul.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 Linaro Ltd.
  4. * Author: Shannon Zhao <[email protected]>
  5. */
  6. #include <linux/cpu.h>
  7. #include <linux/kvm.h>
  8. #include <linux/kvm_host.h>
  9. #include <linux/list.h>
  10. #include <linux/perf_event.h>
  11. #include <linux/perf/arm_pmu.h>
  12. #include <linux/uaccess.h>
  13. #include <asm/kvm_emulate.h>
  14. #include <kvm/arm_pmu.h>
  15. #include <kvm/arm_vgic.h>
  16. #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
  17. DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
  18. static LIST_HEAD(arm_pmus);
  19. static DEFINE_MUTEX(arm_pmus_lock);
  20. static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
  21. static u32 kvm_pmu_event_mask(struct kvm *kvm)
  22. {
  23. unsigned int pmuver;
  24. pmuver = kvm->arch.arm_pmu->pmuver;
  25. switch (pmuver) {
  26. case ID_AA64DFR0_EL1_PMUVer_IMP:
  27. return GENMASK(9, 0);
  28. case ID_AA64DFR0_EL1_PMUVer_V3P1:
  29. case ID_AA64DFR0_EL1_PMUVer_V3P4:
  30. case ID_AA64DFR0_EL1_PMUVer_V3P5:
  31. case ID_AA64DFR0_EL1_PMUVer_V3P7:
  32. return GENMASK(15, 0);
  33. default: /* Shouldn't be here, just for sanity */
  34. WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
  35. return 0;
  36. }
  37. }
  38. /**
  39. * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
  40. * @vcpu: The vcpu pointer
  41. * @select_idx: The counter index
  42. */
  43. static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
  44. {
  45. return (select_idx == ARMV8_PMU_CYCLE_IDX);
  46. }
  47. static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx)
  48. {
  49. return (select_idx == ARMV8_PMU_CYCLE_IDX &&
  50. __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
  51. }
  52. static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx)
  53. {
  54. return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX &&
  55. !kvm_pmu_idx_has_64bit_overflow(vcpu, idx));
  56. }
  57. static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
  58. {
  59. struct kvm_pmu *pmu;
  60. struct kvm_vcpu_arch *vcpu_arch;
  61. pmc -= pmc->idx;
  62. pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
  63. vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
  64. return container_of(vcpu_arch, struct kvm_vcpu, arch);
  65. }
  66. /**
  67. * kvm_pmu_get_counter_value - get PMU counter value
  68. * @vcpu: The vcpu pointer
  69. * @select_idx: The counter index
  70. */
  71. u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
  72. {
  73. u64 counter, reg, enabled, running;
  74. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  75. struct kvm_pmc *pmc = &pmu->pmc[select_idx];
  76. if (!kvm_vcpu_has_pmu(vcpu))
  77. return 0;
  78. reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
  79. ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
  80. counter = __vcpu_sys_reg(vcpu, reg);
  81. /*
  82. * The real counter value is equal to the value of counter register plus
  83. * the value perf event counts.
  84. */
  85. if (pmc->perf_event)
  86. counter += perf_event_read_value(pmc->perf_event, &enabled,
  87. &running);
  88. if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
  89. counter = lower_32_bits(counter);
  90. return counter;
  91. }
  92. /**
  93. * kvm_pmu_set_counter_value - set PMU counter value
  94. * @vcpu: The vcpu pointer
  95. * @select_idx: The counter index
  96. * @val: The counter value
  97. */
  98. void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
  99. {
  100. u64 reg;
  101. if (!kvm_vcpu_has_pmu(vcpu))
  102. return;
  103. reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
  104. ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
  105. __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
  106. /* Recreate the perf event to reflect the updated sample_period */
  107. kvm_pmu_create_perf_event(vcpu, select_idx);
  108. }
  109. /**
  110. * kvm_pmu_release_perf_event - remove the perf event
  111. * @pmc: The PMU counter pointer
  112. */
  113. static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
  114. {
  115. if (pmc->perf_event) {
  116. perf_event_disable(pmc->perf_event);
  117. perf_event_release_kernel(pmc->perf_event);
  118. pmc->perf_event = NULL;
  119. }
  120. }
  121. /**
  122. * kvm_pmu_stop_counter - stop PMU counter
  123. * @pmc: The PMU counter pointer
  124. *
  125. * If this counter has been configured to monitor some event, release it here.
  126. */
  127. static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
  128. {
  129. u64 counter, reg, val;
  130. if (!pmc->perf_event)
  131. return;
  132. counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
  133. if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
  134. reg = PMCCNTR_EL0;
  135. val = counter;
  136. } else {
  137. reg = PMEVCNTR0_EL0 + pmc->idx;
  138. val = lower_32_bits(counter);
  139. }
  140. __vcpu_sys_reg(vcpu, reg) = val;
  141. kvm_pmu_release_perf_event(pmc);
  142. }
  143. /**
  144. * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
  145. * @vcpu: The vcpu pointer
  146. *
  147. */
  148. void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
  149. {
  150. int i;
  151. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  152. for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
  153. pmu->pmc[i].idx = i;
  154. }
  155. /**
  156. * kvm_pmu_vcpu_reset - reset pmu state for cpu
  157. * @vcpu: The vcpu pointer
  158. *
  159. */
  160. void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
  161. {
  162. unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
  163. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  164. int i;
  165. for_each_set_bit(i, &mask, 32)
  166. kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
  167. }
  168. /**
  169. * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
  170. * @vcpu: The vcpu pointer
  171. *
  172. */
  173. void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
  174. {
  175. int i;
  176. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  177. for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
  178. kvm_pmu_release_perf_event(&pmu->pmc[i]);
  179. irq_work_sync(&vcpu->arch.pmu.overflow_work);
  180. }
  181. u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  182. {
  183. u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
  184. val &= ARMV8_PMU_PMCR_N_MASK;
  185. if (val == 0)
  186. return BIT(ARMV8_PMU_CYCLE_IDX);
  187. else
  188. return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
  189. }
  190. /**
  191. * kvm_pmu_enable_counter_mask - enable selected PMU counters
  192. * @vcpu: The vcpu pointer
  193. * @val: the value guest writes to PMCNTENSET register
  194. *
  195. * Call perf_event_enable to start counting the perf event
  196. */
  197. void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
  198. {
  199. int i;
  200. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  201. struct kvm_pmc *pmc;
  202. if (!kvm_vcpu_has_pmu(vcpu))
  203. return;
  204. if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
  205. return;
  206. for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
  207. if (!(val & BIT(i)))
  208. continue;
  209. pmc = &pmu->pmc[i];
  210. if (!pmc->perf_event) {
  211. kvm_pmu_create_perf_event(vcpu, i);
  212. } else {
  213. perf_event_enable(pmc->perf_event);
  214. if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
  215. kvm_debug("fail to enable perf event\n");
  216. }
  217. }
  218. }
  219. /**
  220. * kvm_pmu_disable_counter_mask - disable selected PMU counters
  221. * @vcpu: The vcpu pointer
  222. * @val: the value guest writes to PMCNTENCLR register
  223. *
  224. * Call perf_event_disable to stop counting the perf event
  225. */
  226. void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
  227. {
  228. int i;
  229. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  230. struct kvm_pmc *pmc;
  231. if (!kvm_vcpu_has_pmu(vcpu) || !val)
  232. return;
  233. for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
  234. if (!(val & BIT(i)))
  235. continue;
  236. pmc = &pmu->pmc[i];
  237. if (pmc->perf_event)
  238. perf_event_disable(pmc->perf_event);
  239. }
  240. }
  241. static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
  242. {
  243. u64 reg = 0;
  244. if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
  245. reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
  246. reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
  247. reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
  248. }
  249. return reg;
  250. }
  251. static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
  252. {
  253. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  254. bool overflow;
  255. if (!kvm_vcpu_has_pmu(vcpu))
  256. return;
  257. overflow = !!kvm_pmu_overflow_status(vcpu);
  258. if (pmu->irq_level == overflow)
  259. return;
  260. pmu->irq_level = overflow;
  261. if (likely(irqchip_in_kernel(vcpu->kvm))) {
  262. int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
  263. pmu->irq_num, overflow, pmu);
  264. WARN_ON(ret);
  265. }
  266. }
  267. bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  268. {
  269. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  270. struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
  271. bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
  272. if (likely(irqchip_in_kernel(vcpu->kvm)))
  273. return false;
  274. return pmu->irq_level != run_level;
  275. }
  276. /*
  277. * Reflect the PMU overflow interrupt output level into the kvm_run structure
  278. */
  279. void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
  280. {
  281. struct kvm_sync_regs *regs = &vcpu->run->s.regs;
  282. /* Populate the timer bitmap for user space */
  283. regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
  284. if (vcpu->arch.pmu.irq_level)
  285. regs->device_irq_level |= KVM_ARM_DEV_PMU;
  286. }
  287. /**
  288. * kvm_pmu_flush_hwstate - flush pmu state to cpu
  289. * @vcpu: The vcpu pointer
  290. *
  291. * Check if the PMU has overflowed while we were running in the host, and inject
  292. * an interrupt if that was the case.
  293. */
  294. void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
  295. {
  296. kvm_pmu_update_state(vcpu);
  297. }
  298. /**
  299. * kvm_pmu_sync_hwstate - sync pmu state from cpu
  300. * @vcpu: The vcpu pointer
  301. *
  302. * Check if the PMU has overflowed while we were running in the guest, and
  303. * inject an interrupt if that was the case.
  304. */
  305. void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
  306. {
  307. kvm_pmu_update_state(vcpu);
  308. }
  309. /**
  310. * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
  311. * to the event.
  312. * This is why we need a callback to do it once outside of the NMI context.
  313. */
  314. static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
  315. {
  316. struct kvm_vcpu *vcpu;
  317. struct kvm_pmu *pmu;
  318. pmu = container_of(work, struct kvm_pmu, overflow_work);
  319. vcpu = kvm_pmc_to_vcpu(pmu->pmc);
  320. kvm_vcpu_kick(vcpu);
  321. }
  322. /*
  323. * Perform an increment on any of the counters described in @mask,
  324. * generating the overflow if required, and propagate it as a chained
  325. * event if possible.
  326. */
  327. static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
  328. unsigned long mask, u32 event)
  329. {
  330. int i;
  331. if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
  332. return;
  333. /* Weed out disabled counters */
  334. mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
  335. for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
  336. u64 type, reg;
  337. /* Filter on event type */
  338. type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
  339. type &= kvm_pmu_event_mask(vcpu->kvm);
  340. if (type != event)
  341. continue;
  342. /* Increment this counter */
  343. reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
  344. reg = lower_32_bits(reg);
  345. __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
  346. if (reg) /* No overflow? move on */
  347. continue;
  348. /* Mark overflow */
  349. __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
  350. if (kvm_pmu_counter_can_chain(vcpu, i))
  351. kvm_pmu_counter_increment(vcpu, BIT(i + 1),
  352. ARMV8_PMUV3_PERFCTR_CHAIN);
  353. }
  354. }
  355. /* Compute the sample period for a given counter value */
  356. static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter)
  357. {
  358. u64 val;
  359. if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
  360. if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx))
  361. val = -(counter & GENMASK(31, 0));
  362. else
  363. val = (-counter) & GENMASK(63, 0);
  364. } else {
  365. val = (-counter) & GENMASK(31, 0);
  366. }
  367. return val;
  368. }
  369. /**
  370. * When the perf event overflows, set the overflow status and inform the vcpu.
  371. */
  372. static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
  373. struct perf_sample_data *data,
  374. struct pt_regs *regs)
  375. {
  376. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  377. struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
  378. struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
  379. int idx = pmc->idx;
  380. u64 period;
  381. cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
  382. /*
  383. * Reset the sample period to the architectural limit,
  384. * i.e. the point where the counter overflows.
  385. */
  386. period = compute_period(vcpu, idx, local64_read(&perf_event->count));
  387. local64_set(&perf_event->hw.period_left, 0);
  388. perf_event->attr.sample_period = period;
  389. perf_event->hw.sample_period = period;
  390. __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
  391. if (kvm_pmu_counter_can_chain(vcpu, idx))
  392. kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
  393. ARMV8_PMUV3_PERFCTR_CHAIN);
  394. if (kvm_pmu_overflow_status(vcpu)) {
  395. kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
  396. if (!in_nmi())
  397. kvm_vcpu_kick(vcpu);
  398. else
  399. irq_work_queue(&vcpu->arch.pmu.overflow_work);
  400. }
  401. cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
  402. }
  403. /**
  404. * kvm_pmu_software_increment - do software increment
  405. * @vcpu: The vcpu pointer
  406. * @val: the value guest writes to PMSWINC register
  407. */
  408. void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
  409. {
  410. kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
  411. }
  412. /**
  413. * kvm_pmu_handle_pmcr - handle PMCR register
  414. * @vcpu: The vcpu pointer
  415. * @val: the value guest writes to PMCR register
  416. */
  417. void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
  418. {
  419. int i;
  420. if (!kvm_vcpu_has_pmu(vcpu))
  421. return;
  422. /* The reset bits don't indicate any state, and shouldn't be saved. */
  423. __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
  424. if (val & ARMV8_PMU_PMCR_E) {
  425. kvm_pmu_enable_counter_mask(vcpu,
  426. __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
  427. } else {
  428. kvm_pmu_disable_counter_mask(vcpu,
  429. __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
  430. }
  431. if (val & ARMV8_PMU_PMCR_C)
  432. kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
  433. if (val & ARMV8_PMU_PMCR_P) {
  434. unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
  435. mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
  436. for_each_set_bit(i, &mask, 32)
  437. kvm_pmu_set_counter_value(vcpu, i, 0);
  438. }
  439. kvm_vcpu_pmu_restore_guest(vcpu);
  440. }
  441. static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
  442. {
  443. return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
  444. (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
  445. }
  446. /**
  447. * kvm_pmu_create_perf_event - create a perf event for a counter
  448. * @vcpu: The vcpu pointer
  449. * @select_idx: The number of selected counter
  450. */
  451. static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
  452. {
  453. struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
  454. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  455. struct kvm_pmc *pmc = &pmu->pmc[select_idx];
  456. struct perf_event *event;
  457. struct perf_event_attr attr;
  458. u64 eventsel, counter, reg, data;
  459. reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
  460. ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
  461. data = __vcpu_sys_reg(vcpu, reg);
  462. kvm_pmu_stop_counter(vcpu, pmc);
  463. if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
  464. eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
  465. else
  466. eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
  467. /*
  468. * Neither SW increment nor chained events need to be backed
  469. * by a perf event.
  470. */
  471. if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
  472. eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
  473. return;
  474. /*
  475. * If we have a filter in place and that the event isn't allowed, do
  476. * not install a perf event either.
  477. */
  478. if (vcpu->kvm->arch.pmu_filter &&
  479. !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
  480. return;
  481. memset(&attr, 0, sizeof(struct perf_event_attr));
  482. attr.type = arm_pmu->pmu.type;
  483. attr.size = sizeof(attr);
  484. attr.pinned = 1;
  485. attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
  486. attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
  487. attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
  488. attr.exclude_hv = 1; /* Don't count EL2 events */
  489. attr.exclude_host = 1; /* Don't count host events */
  490. attr.config = eventsel;
  491. counter = kvm_pmu_get_counter_value(vcpu, select_idx);
  492. /*
  493. * If counting with a 64bit counter, advertise it to the perf
  494. * code, carefully dealing with the initial sample period
  495. * which also depends on the overflow.
  496. */
  497. if (kvm_pmu_idx_is_64bit(vcpu, select_idx))
  498. attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
  499. attr.sample_period = compute_period(vcpu, select_idx, counter);
  500. event = perf_event_create_kernel_counter(&attr, -1, current,
  501. kvm_pmu_perf_overflow, pmc);
  502. if (IS_ERR(event)) {
  503. pr_err_once("kvm: pmu event creation failed %ld\n",
  504. PTR_ERR(event));
  505. return;
  506. }
  507. pmc->perf_event = event;
  508. }
  509. /**
  510. * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
  511. * @vcpu: The vcpu pointer
  512. * @data: The data guest writes to PMXEVTYPER_EL0
  513. * @select_idx: The number of selected counter
  514. *
  515. * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
  516. * event with given hardware event number. Here we call perf_event API to
  517. * emulate this action and create a kernel perf event for it.
  518. */
  519. void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  520. u64 select_idx)
  521. {
  522. u64 reg, mask;
  523. if (!kvm_vcpu_has_pmu(vcpu))
  524. return;
  525. mask = ARMV8_PMU_EVTYPE_MASK;
  526. mask &= ~ARMV8_PMU_EVTYPE_EVENT;
  527. mask |= kvm_pmu_event_mask(vcpu->kvm);
  528. reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
  529. ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
  530. __vcpu_sys_reg(vcpu, reg) = data & mask;
  531. kvm_pmu_create_perf_event(vcpu, select_idx);
  532. }
  533. void kvm_host_pmu_init(struct arm_pmu *pmu)
  534. {
  535. struct arm_pmu_entry *entry;
  536. if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
  537. return;
  538. mutex_lock(&arm_pmus_lock);
  539. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  540. if (!entry)
  541. goto out_unlock;
  542. entry->arm_pmu = pmu;
  543. list_add_tail(&entry->entry, &arm_pmus);
  544. if (list_is_singular(&arm_pmus))
  545. static_branch_enable(&kvm_arm_pmu_available);
  546. out_unlock:
  547. mutex_unlock(&arm_pmus_lock);
  548. }
  549. static struct arm_pmu *kvm_pmu_probe_armpmu(void)
  550. {
  551. struct perf_event_attr attr = { };
  552. struct perf_event *event;
  553. struct arm_pmu *pmu = NULL;
  554. /*
  555. * Create a dummy event that only counts user cycles. As we'll never
  556. * leave this function with the event being live, it will never
  557. * count anything. But it allows us to probe some of the PMU
  558. * details. Yes, this is terrible.
  559. */
  560. attr.type = PERF_TYPE_RAW;
  561. attr.size = sizeof(attr);
  562. attr.pinned = 1;
  563. attr.disabled = 0;
  564. attr.exclude_user = 0;
  565. attr.exclude_kernel = 1;
  566. attr.exclude_hv = 1;
  567. attr.exclude_host = 1;
  568. attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
  569. attr.sample_period = GENMASK(63, 0);
  570. event = perf_event_create_kernel_counter(&attr, -1, current,
  571. kvm_pmu_perf_overflow, &attr);
  572. if (IS_ERR(event)) {
  573. pr_err_once("kvm: pmu event creation failed %ld\n",
  574. PTR_ERR(event));
  575. return NULL;
  576. }
  577. if (event->pmu) {
  578. pmu = to_arm_pmu(event->pmu);
  579. if (pmu->pmuver == 0 ||
  580. pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
  581. pmu = NULL;
  582. }
  583. perf_event_disable(event);
  584. perf_event_release_kernel(event);
  585. return pmu;
  586. }
  587. u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
  588. {
  589. unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
  590. u64 val, mask = 0;
  591. int base, i, nr_events;
  592. if (!kvm_vcpu_has_pmu(vcpu))
  593. return 0;
  594. if (!pmceid1) {
  595. val = read_sysreg(pmceid0_el0);
  596. base = 0;
  597. } else {
  598. val = read_sysreg(pmceid1_el0);
  599. /*
  600. * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
  601. * as RAZ
  602. */
  603. if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
  604. val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
  605. base = 32;
  606. }
  607. if (!bmap)
  608. return val;
  609. nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
  610. for (i = 0; i < 32; i += 8) {
  611. u64 byte;
  612. byte = bitmap_get_value8(bmap, base + i);
  613. mask |= byte << i;
  614. if (nr_events >= (0x4000 + base + 32)) {
  615. byte = bitmap_get_value8(bmap, 0x4000 + base + i);
  616. mask |= byte << (32 + i);
  617. }
  618. }
  619. return val & mask;
  620. }
  621. int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
  622. {
  623. if (!kvm_vcpu_has_pmu(vcpu))
  624. return 0;
  625. if (!vcpu->arch.pmu.created)
  626. return -EINVAL;
  627. /*
  628. * A valid interrupt configuration for the PMU is either to have a
  629. * properly configured interrupt number and using an in-kernel
  630. * irqchip, or to not have an in-kernel GIC and not set an IRQ.
  631. */
  632. if (irqchip_in_kernel(vcpu->kvm)) {
  633. int irq = vcpu->arch.pmu.irq_num;
  634. /*
  635. * If we are using an in-kernel vgic, at this point we know
  636. * the vgic will be initialized, so we can check the PMU irq
  637. * number against the dimensions of the vgic and make sure
  638. * it's valid.
  639. */
  640. if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
  641. return -EINVAL;
  642. } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
  643. return -EINVAL;
  644. }
  645. /* One-off reload of the PMU on first run */
  646. kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
  647. return 0;
  648. }
  649. static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
  650. {
  651. if (irqchip_in_kernel(vcpu->kvm)) {
  652. int ret;
  653. /*
  654. * If using the PMU with an in-kernel virtual GIC
  655. * implementation, we require the GIC to be already
  656. * initialized when initializing the PMU.
  657. */
  658. if (!vgic_initialized(vcpu->kvm))
  659. return -ENODEV;
  660. if (!kvm_arm_pmu_irq_initialized(vcpu))
  661. return -ENXIO;
  662. ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
  663. &vcpu->arch.pmu);
  664. if (ret)
  665. return ret;
  666. }
  667. init_irq_work(&vcpu->arch.pmu.overflow_work,
  668. kvm_pmu_perf_overflow_notify_vcpu);
  669. vcpu->arch.pmu.created = true;
  670. return 0;
  671. }
  672. /*
  673. * For one VM the interrupt type must be same for each vcpu.
  674. * As a PPI, the interrupt number is the same for all vcpus,
  675. * while as an SPI it must be a separate number per vcpu.
  676. */
  677. static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
  678. {
  679. unsigned long i;
  680. struct kvm_vcpu *vcpu;
  681. kvm_for_each_vcpu(i, vcpu, kvm) {
  682. if (!kvm_arm_pmu_irq_initialized(vcpu))
  683. continue;
  684. if (irq_is_ppi(irq)) {
  685. if (vcpu->arch.pmu.irq_num != irq)
  686. return false;
  687. } else {
  688. if (vcpu->arch.pmu.irq_num == irq)
  689. return false;
  690. }
  691. }
  692. return true;
  693. }
  694. static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
  695. {
  696. struct kvm *kvm = vcpu->kvm;
  697. struct arm_pmu_entry *entry;
  698. struct arm_pmu *arm_pmu;
  699. int ret = -ENXIO;
  700. lockdep_assert_held(&kvm->arch.config_lock);
  701. mutex_lock(&arm_pmus_lock);
  702. list_for_each_entry(entry, &arm_pmus, entry) {
  703. arm_pmu = entry->arm_pmu;
  704. if (arm_pmu->pmu.type == pmu_id) {
  705. if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
  706. (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
  707. ret = -EBUSY;
  708. break;
  709. }
  710. kvm->arch.arm_pmu = arm_pmu;
  711. cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
  712. ret = 0;
  713. break;
  714. }
  715. }
  716. mutex_unlock(&arm_pmus_lock);
  717. return ret;
  718. }
  719. int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
  720. {
  721. struct kvm *kvm = vcpu->kvm;
  722. lockdep_assert_held(&kvm->arch.config_lock);
  723. if (!kvm_vcpu_has_pmu(vcpu))
  724. return -ENODEV;
  725. if (vcpu->arch.pmu.created)
  726. return -EBUSY;
  727. if (!kvm->arch.arm_pmu) {
  728. /* No PMU set, get the default one */
  729. kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
  730. if (!kvm->arch.arm_pmu)
  731. return -ENODEV;
  732. }
  733. switch (attr->attr) {
  734. case KVM_ARM_VCPU_PMU_V3_IRQ: {
  735. int __user *uaddr = (int __user *)(long)attr->addr;
  736. int irq;
  737. if (!irqchip_in_kernel(kvm))
  738. return -EINVAL;
  739. if (get_user(irq, uaddr))
  740. return -EFAULT;
  741. /* The PMU overflow interrupt can be a PPI or a valid SPI. */
  742. if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
  743. return -EINVAL;
  744. if (!pmu_irq_is_valid(kvm, irq))
  745. return -EINVAL;
  746. if (kvm_arm_pmu_irq_initialized(vcpu))
  747. return -EBUSY;
  748. kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
  749. vcpu->arch.pmu.irq_num = irq;
  750. return 0;
  751. }
  752. case KVM_ARM_VCPU_PMU_V3_FILTER: {
  753. struct kvm_pmu_event_filter __user *uaddr;
  754. struct kvm_pmu_event_filter filter;
  755. int nr_events;
  756. nr_events = kvm_pmu_event_mask(kvm) + 1;
  757. uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
  758. if (copy_from_user(&filter, uaddr, sizeof(filter)))
  759. return -EFAULT;
  760. if (((u32)filter.base_event + filter.nevents) > nr_events ||
  761. (filter.action != KVM_PMU_EVENT_ALLOW &&
  762. filter.action != KVM_PMU_EVENT_DENY))
  763. return -EINVAL;
  764. if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
  765. return -EBUSY;
  766. if (!kvm->arch.pmu_filter) {
  767. kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
  768. if (!kvm->arch.pmu_filter)
  769. return -ENOMEM;
  770. /*
  771. * The default depends on the first applied filter.
  772. * If it allows events, the default is to deny.
  773. * Conversely, if the first filter denies a set of
  774. * events, the default is to allow.
  775. */
  776. if (filter.action == KVM_PMU_EVENT_ALLOW)
  777. bitmap_zero(kvm->arch.pmu_filter, nr_events);
  778. else
  779. bitmap_fill(kvm->arch.pmu_filter, nr_events);
  780. }
  781. if (filter.action == KVM_PMU_EVENT_ALLOW)
  782. bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
  783. else
  784. bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
  785. return 0;
  786. }
  787. case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
  788. int __user *uaddr = (int __user *)(long)attr->addr;
  789. int pmu_id;
  790. if (get_user(pmu_id, uaddr))
  791. return -EFAULT;
  792. return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
  793. }
  794. case KVM_ARM_VCPU_PMU_V3_INIT:
  795. return kvm_arm_pmu_v3_init(vcpu);
  796. }
  797. return -ENXIO;
  798. }
  799. int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
  800. {
  801. switch (attr->attr) {
  802. case KVM_ARM_VCPU_PMU_V3_IRQ: {
  803. int __user *uaddr = (int __user *)(long)attr->addr;
  804. int irq;
  805. if (!irqchip_in_kernel(vcpu->kvm))
  806. return -EINVAL;
  807. if (!kvm_vcpu_has_pmu(vcpu))
  808. return -ENODEV;
  809. if (!kvm_arm_pmu_irq_initialized(vcpu))
  810. return -ENXIO;
  811. irq = vcpu->arch.pmu.irq_num;
  812. return put_user(irq, uaddr);
  813. }
  814. }
  815. return -ENXIO;
  816. }
  817. int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
  818. {
  819. switch (attr->attr) {
  820. case KVM_ARM_VCPU_PMU_V3_IRQ:
  821. case KVM_ARM_VCPU_PMU_V3_INIT:
  822. case KVM_ARM_VCPU_PMU_V3_FILTER:
  823. case KVM_ARM_VCPU_PMU_V3_SET_PMU:
  824. if (kvm_vcpu_has_pmu(vcpu))
  825. return 0;
  826. }
  827. return -ENXIO;
  828. }