pmu_lib.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "qcom-pmu: " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/delay.h>
  11. #include <linux/err.h>
  12. #include <linux/errno.h>
  13. #include <linux/io.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/of.h>
  16. #include <linux/of_address.h>
  17. #include <linux/of_fdt.h>
  18. #include <linux/of_device.h>
  19. #include <linux/slab.h>
  20. #include <linux/cpu_pm.h>
  21. #include <linux/cpu.h>
  22. #include <linux/mutex.h>
  23. #include <linux/cpu.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/perf_event.h>
  26. #include <linux/cpuidle.h>
  27. #include <trace/events/power.h>
  28. #include <trace/hooks/cpuidle.h>
  29. #include <soc/qcom/pmu_lib.h>
  30. #include <soc/qcom/qcom_llcc_pmu.h>
  31. #include <linux/perf/arm_pmu.h>
  32. #define MAX_PMU_EVS QCOM_PMU_MAX_EVS
  33. #define INVALID_ID 0xFF
  34. static void __iomem *pmu_base;
  35. static uint32_t phys_cpu[NR_CPUS];
  36. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  37. #include <linux/qcom_scmi_vendor.h>
  38. #define MAX_NUM_CPUS 8
  39. #define PMUMAP_ALGO_STR 0x504D554D4150 /* "PMUMAP" */
  40. enum scmi_c1dcvs_protocol_cmd {
  41. SET_PMU_MAP = 11,
  42. SET_ENABLE_TRACE,
  43. SET_ENABLE_CACHING,
  44. };
  45. struct pmu_map_msg {
  46. uint8_t hw_cntrs[MAX_NUM_CPUS][MAX_CPUCP_EVT];
  47. };
  48. #endif
  49. struct cpucp_pmu_ctrs {
  50. u32 evctrs[MAX_CPUCP_EVT];
  51. u32 valid;
  52. };
  53. struct event_data {
  54. u32 event_id;
  55. struct perf_event *pevent;
  56. int cpu;
  57. u64 cached_count;
  58. enum amu_counters amu_id;
  59. enum cpucp_ev_idx cid;
  60. };
  61. struct amu_data {
  62. enum amu_counters amu_id;
  63. u64 count;
  64. };
  65. struct cpu_data {
  66. bool is_idle;
  67. bool is_hp;
  68. bool is_pc;
  69. struct event_data events[MAX_PMU_EVS];
  70. u32 num_evs;
  71. atomic_t read_cnt;
  72. spinlock_t read_lock;
  73. };
  74. static DEFINE_PER_CPU(struct cpu_data *, cpu_ev_data);
  75. static bool qcom_pmu_inited;
  76. static bool pmu_long_counter;
  77. static int cpuhp_state;
  78. static struct scmi_protocol_handle *ph;
  79. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  80. static const struct qcom_scmi_vendor_ops *ops;
  81. #else
  82. static const struct scmi_pmu_vendor_ops *ops;
  83. #endif
  84. static LIST_HEAD(idle_notif_list);
  85. static DEFINE_SPINLOCK(idle_list_lock);
  86. static struct cpucp_hlos_map cpucp_map[MAX_CPUCP_EVT];
  87. static struct kobject pmu_kobj;
  88. static bool pmu_counters_enabled = true;
  89. static unsigned int pmu_enable_trace;
  90. static bool llcc_ignore_setup;
  91. /*
  92. * is_amu_valid: Check if AMUs are supported and if the id corresponds to the
  93. * four supported AMU counters i.e. SYS_AMEVCNTR0_CONST_EL0,
  94. * SYS_AMEVCNTR0_CORE_EL0, SYS_AMEVCNTR0_INST_RET_EL0, SYS_AMEVCNTR0_MEM_STALL
  95. */
  96. static inline bool is_amu_valid(enum amu_counters amu_id)
  97. {
  98. return (amu_id >= SYS_AMU_CONST_CYC && amu_id < SYS_AMU_MAX &&
  99. IS_ENABLED(CONFIG_ARM64_AMU_EXTN));
  100. }
  101. /*
  102. * is_cid_valid: Check if events are supported and if the id corresponds to the
  103. * supported CPUCP events i.e. enum cpucp_ev_idx,
  104. */
  105. static inline bool is_cid_valid(enum cpucp_ev_idx cid)
  106. {
  107. return (cid >= CPU_CYC_EVT && cid < MAX_CPUCP_EVT);
  108. }
  109. /*
  110. * is_event_valid: Check if event has an id and a corresponding pevent or
  111. * valid amu id.
  112. */
  113. static inline bool is_event_valid(struct event_data *ev)
  114. {
  115. return (ev->event_id && (ev->pevent || is_amu_valid(ev->amu_id)));
  116. }
  117. /*
  118. * is_event_shared: Check if event is supposed to be shared with cpucp.
  119. */
  120. static inline bool is_event_shared(struct event_data *ev)
  121. {
  122. return (is_cid_valid(ev->cid) && cpucp_map[ev->cid].shared);
  123. }
  124. #define CYCLE_COUNTER_ID 0x11
  125. static inline u64 cached_count_value(struct event_data *ev, u64 event_cached_count, bool amu)
  126. {
  127. struct arm_pmu *cpu_pmu = container_of(ev->pevent->pmu, struct arm_pmu, pmu);
  128. if (amu)
  129. return event_cached_count;
  130. if (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5)
  131. event_cached_count |= (pmu_long_counter ? BIT(63) : GENMASK(63, 31));
  132. else {
  133. if (ev->event_id == CYCLE_COUNTER_ID)
  134. event_cached_count |= GENMASK(63, 31);
  135. else
  136. event_cached_count = ((event_cached_count & GENMASK(31, 0)) |
  137. BIT(31));
  138. }
  139. return event_cached_count;
  140. }
  141. static struct perf_event_attr *alloc_attr(void)
  142. {
  143. struct perf_event_attr *attr;
  144. attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
  145. if (!attr)
  146. return attr;
  147. attr->size = sizeof(struct perf_event_attr);
  148. attr->pinned = 1;
  149. return attr;
  150. }
  151. static int set_event(struct event_data *ev, int cpu,
  152. struct perf_event_attr *attr)
  153. {
  154. struct perf_event *pevent;
  155. u32 type = PERF_TYPE_RAW;
  156. int ret;
  157. /* Set the cpu and exit if amu is supported */
  158. if (is_amu_valid(ev->amu_id))
  159. goto set_cpu;
  160. else
  161. ev->amu_id = SYS_AMU_MAX;
  162. if (!is_cid_valid(ev->cid))
  163. ev->cid = MAX_CPUCP_EVT;
  164. if (!ev->event_id)
  165. return 0;
  166. attr->config = ev->event_id;
  167. /* enable 64-bit counter */
  168. if (pmu_long_counter)
  169. attr->config1 = 1;
  170. if (ev->event_id == QCOM_LLCC_PMU_RD_EV) {
  171. /* Ignore setting up the event if property set. This will avoid
  172. * reading of event as well since ev->pevent will be NULL.
  173. */
  174. if (llcc_ignore_setup)
  175. goto set_cpu;
  176. ret = qcom_llcc_pmu_hw_type(&type);
  177. if (ret < 0)
  178. return ret;
  179. }
  180. attr->type = type;
  181. pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
  182. if (IS_ERR(pevent))
  183. return PTR_ERR(pevent);
  184. perf_event_enable(pevent);
  185. ev->pevent = pevent;
  186. set_cpu:
  187. ev->cpu = cpu;
  188. return 0;
  189. }
  190. static inline void delete_event(struct event_data *event)
  191. {
  192. if (event->pevent) {
  193. perf_event_release_kernel(event->pevent);
  194. event->pevent = NULL;
  195. }
  196. }
  197. static void read_amu_reg(void *amu_data)
  198. {
  199. struct amu_data *data = amu_data;
  200. switch (data->amu_id) {
  201. case SYS_AMU_CONST_CYC:
  202. data->count = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
  203. break;
  204. case SYS_AMU_CORE_CYC:
  205. data->count = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
  206. break;
  207. case SYS_AMU_INST_RET:
  208. data->count = read_sysreg_s(SYS_AMEVCNTR0_INST_RET_EL0);
  209. break;
  210. case SYS_AMU_STALL_MEM:
  211. data->count = read_sysreg_s(SYS_AMEVCNTR0_MEM_STALL);
  212. break;
  213. default:
  214. pr_err("AMU counter %d not supported!\n", data->amu_id);
  215. }
  216. }
  217. static inline u64 read_event(struct event_data *event, bool local)
  218. {
  219. u64 enabled, running, total = 0;
  220. struct amu_data data;
  221. int ret = 0;
  222. if (is_amu_valid(event->amu_id)) {
  223. data.amu_id = event->amu_id;
  224. if (local)
  225. read_amu_reg(&data);
  226. else {
  227. ret = smp_call_function_single(event->cpu, read_amu_reg,
  228. &data, true);
  229. if (ret < 0)
  230. return event->cached_count;
  231. }
  232. total = data.count;
  233. } else {
  234. if (!event->pevent)
  235. return event->cached_count;
  236. if (local)
  237. perf_event_read_local(event->pevent, &total, NULL, NULL);
  238. else
  239. total = perf_event_read_value(event->pevent, &enabled,
  240. &running);
  241. }
  242. event->cached_count = total;
  243. return total;
  244. }
  245. static int __qcom_pmu_read(int cpu, u32 event_id, u64 *pmu_data, bool local)
  246. {
  247. struct cpu_data *cpu_data;
  248. struct event_data *event;
  249. int i;
  250. unsigned long flags;
  251. if (!qcom_pmu_inited)
  252. return -ENODEV;
  253. if (!event_id || !pmu_data || cpu >= num_possible_cpus())
  254. return -EINVAL;
  255. cpu_data = per_cpu(cpu_ev_data, cpu);
  256. for (i = 0; i < cpu_data->num_evs; i++) {
  257. event = &cpu_data->events[i];
  258. if (event->event_id == event_id)
  259. break;
  260. }
  261. if (i == cpu_data->num_evs)
  262. return -ENOENT;
  263. spin_lock_irqsave(&cpu_data->read_lock, flags);
  264. if (cpu_data->is_hp || cpu_data->is_idle || cpu_data->is_pc) {
  265. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  266. *pmu_data = event->cached_count;
  267. return 0;
  268. }
  269. atomic_inc(&cpu_data->read_cnt);
  270. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  271. *pmu_data = read_event(event, local);
  272. atomic_dec(&cpu_data->read_cnt);
  273. return 0;
  274. }
  275. int __qcom_pmu_read_all(int cpu, struct qcom_pmu_data *data, bool local)
  276. {
  277. struct cpu_data *cpu_data;
  278. struct event_data *event;
  279. int i, cnt = 0;
  280. bool use_cache = false;
  281. unsigned long flags;
  282. if (!qcom_pmu_inited)
  283. return -ENODEV;
  284. if (!data || cpu >= num_possible_cpus())
  285. return -EINVAL;
  286. cpu_data = per_cpu(cpu_ev_data, cpu);
  287. spin_lock_irqsave(&cpu_data->read_lock, flags);
  288. if (cpu_data->is_hp || cpu_data->is_idle || cpu_data->is_pc)
  289. use_cache = true;
  290. else
  291. atomic_inc(&cpu_data->read_cnt);
  292. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  293. for (i = 0; i < cpu_data->num_evs; i++) {
  294. event = &cpu_data->events[i];
  295. if (!event->event_id)
  296. continue;
  297. data->event_ids[cnt] = event->event_id;
  298. if (use_cache)
  299. data->ev_data[cnt] = event->cached_count;
  300. else
  301. data->ev_data[cnt] = read_event(event, local);
  302. cnt++;
  303. }
  304. data->num_evs = cnt;
  305. if (!use_cache)
  306. atomic_dec(&cpu_data->read_cnt);
  307. return 0;
  308. }
  309. static struct event_data *get_event(u32 event_id, int cpu)
  310. {
  311. struct cpu_data *cpu_data;
  312. struct event_data *event;
  313. int i;
  314. if (!qcom_pmu_inited)
  315. return ERR_PTR(-EPROBE_DEFER);
  316. if (!event_id || cpu >= num_possible_cpus())
  317. return ERR_PTR(-EINVAL);
  318. cpu_data = per_cpu(cpu_ev_data, cpu);
  319. for (i = 0; i < cpu_data->num_evs; i++) {
  320. event = &cpu_data->events[i];
  321. if (event->event_id == event_id)
  322. return event;
  323. }
  324. return ERR_PTR(-ENOENT);
  325. }
  326. int qcom_get_cpucp_id(u32 event_id, int cpu)
  327. {
  328. struct event_data *event;
  329. event = get_event(event_id, cpu);
  330. if (IS_ERR(event)) {
  331. pr_err("error getting event %d\n", PTR_ERR(event));
  332. return PTR_ERR(event);
  333. }
  334. return event->cid;
  335. }
  336. EXPORT_SYMBOL(qcom_get_cpucp_id);
  337. int qcom_pmu_event_supported(u32 event_id, int cpu)
  338. {
  339. struct event_data *event;
  340. event = get_event(event_id, cpu);
  341. return PTR_ERR_OR_ZERO(event);
  342. }
  343. EXPORT_SYMBOL(qcom_pmu_event_supported);
  344. int qcom_pmu_read(int cpu, u32 event_id, u64 *pmu_data)
  345. {
  346. return __qcom_pmu_read(cpu, event_id, pmu_data, false);
  347. }
  348. EXPORT_SYMBOL(qcom_pmu_read);
  349. int qcom_pmu_read_local(u32 event_id, u64 *pmu_data)
  350. {
  351. int this_cpu = smp_processor_id();
  352. return __qcom_pmu_read(this_cpu, event_id, pmu_data, true);
  353. }
  354. EXPORT_SYMBOL(qcom_pmu_read_local);
  355. int qcom_pmu_read_all(int cpu, struct qcom_pmu_data *data)
  356. {
  357. return __qcom_pmu_read_all(cpu, data, false);
  358. }
  359. EXPORT_SYMBOL(qcom_pmu_read_all);
  360. int qcom_pmu_read_all_local(struct qcom_pmu_data *data)
  361. {
  362. int this_cpu = smp_processor_id();
  363. return __qcom_pmu_read_all(this_cpu, data, true);
  364. }
  365. EXPORT_SYMBOL(qcom_pmu_read_all_local);
  366. int qcom_pmu_idle_register(struct qcom_pmu_notif_node *idle_node)
  367. {
  368. struct qcom_pmu_notif_node *tmp_node;
  369. if (!idle_node || !idle_node->idle_cb)
  370. return -EINVAL;
  371. spin_lock(&idle_list_lock);
  372. list_for_each_entry(tmp_node, &idle_notif_list, node)
  373. if (tmp_node->idle_cb == idle_node->idle_cb)
  374. goto out;
  375. list_add_tail(&idle_node->node, &idle_notif_list);
  376. out:
  377. spin_unlock(&idle_list_lock);
  378. return 0;
  379. }
  380. EXPORT_SYMBOL(qcom_pmu_idle_register);
  381. int qcom_pmu_idle_unregister(struct qcom_pmu_notif_node *idle_node)
  382. {
  383. struct qcom_pmu_notif_node *tmp_node;
  384. int ret = -EINVAL;
  385. if (!idle_node || !idle_node->idle_cb)
  386. return ret;
  387. spin_lock(&idle_list_lock);
  388. list_for_each_entry(tmp_node, &idle_notif_list, node) {
  389. if (tmp_node->idle_cb == idle_node->idle_cb) {
  390. list_del(&tmp_node->node);
  391. ret = 0;
  392. break;
  393. }
  394. }
  395. spin_unlock(&idle_list_lock);
  396. return ret;
  397. }
  398. EXPORT_SYMBOL(qcom_pmu_idle_unregister);
  399. static int events_caching_enable(void)
  400. {
  401. int ret = 0;
  402. unsigned int enable = 1;
  403. if (!qcom_pmu_inited)
  404. return -EPROBE_DEFER;
  405. if (!ops || !pmu_base)
  406. return ret;
  407. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  408. ret = ops->set_param(ph, &enable, PMUMAP_ALGO_STR,
  409. SET_ENABLE_CACHING, sizeof(enable));
  410. #else
  411. ret = ops->set_cache_enable(ph, &enable);
  412. #endif
  413. if (ret < 0)
  414. pr_err("failed to set cache enable tunable :%d\n", ret);
  415. return ret;
  416. }
  417. static int configure_cpucp_map(cpumask_t mask)
  418. {
  419. struct event_data *event;
  420. int i, cpu, ret = 0, cid;
  421. uint8_t pmu_map[MAX_NUM_CPUS][MAX_CPUCP_EVT];
  422. struct cpu_data *cpu_data;
  423. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  424. int j;
  425. struct pmu_map_msg msg;
  426. #endif
  427. if (!qcom_pmu_inited)
  428. return -EPROBE_DEFER;
  429. if (!ops)
  430. return ret;
  431. /*
  432. * Only set the hw cntrs for cpus that are part of the cpumask passed
  433. * in argument and cpucp_map events mask. Set rest of the memory with
  434. * INVALID_ID which is ignored on cpucp side.
  435. */
  436. memset(pmu_map, INVALID_ID, MAX_NUM_CPUS * MAX_CPUCP_EVT);
  437. for_each_cpu(cpu, &mask) {
  438. cpu_data = per_cpu(cpu_ev_data, cpu);
  439. for (i = 0; i < cpu_data->num_evs; i++) {
  440. event = &cpu_data->events[i];
  441. cid = event->cid;
  442. if (!is_event_shared(event) ||
  443. is_amu_valid(event->amu_id) || !event->pevent ||
  444. !cpumask_test_cpu(cpu, to_cpumask(&cpucp_map[cid].cpus)))
  445. continue;
  446. pmu_map[phys_cpu[cpu]][cid] = event->pevent->hw.idx;
  447. }
  448. }
  449. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  450. for (i = 0; i < MAX_NUM_CPUS; i++) {
  451. for (j = 0; j < MAX_CPUCP_EVT; j++)
  452. msg.hw_cntrs[i][j] = pmu_map[i][j];
  453. }
  454. ret = ops->set_param(ph, &msg, PMUMAP_ALGO_STR, SET_PMU_MAP, sizeof(msg));
  455. #else
  456. ret = ops->set_pmu_map(ph, pmu_map);
  457. #endif
  458. if (ret < 0)
  459. pr_err("failed to set pmu map :%d\n", ret);
  460. return ret;
  461. }
  462. static void qcom_pmu_idle_enter_notif(void *unused, int *state,
  463. struct cpuidle_device *dev)
  464. {
  465. struct cpu_data *cpu_data = per_cpu(cpu_ev_data, dev->cpu);
  466. struct qcom_pmu_data pmu_data;
  467. struct event_data *ev;
  468. struct qcom_pmu_notif_node *idle_node;
  469. int i, cnt = 0;
  470. unsigned long flags;
  471. spin_lock_irqsave(&cpu_data->read_lock, flags);
  472. if (cpu_data->is_idle || cpu_data->is_hp || cpu_data->is_pc) {
  473. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  474. return;
  475. }
  476. cpu_data->is_idle = true;
  477. atomic_inc(&cpu_data->read_cnt);
  478. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  479. for (i = 0; i < cpu_data->num_evs; i++) {
  480. ev = &cpu_data->events[i];
  481. if (!is_event_valid(ev))
  482. continue;
  483. ev->cached_count = read_event(ev, true);
  484. pmu_data.event_ids[cnt] = ev->event_id;
  485. pmu_data.ev_data[cnt] = ev->cached_count;
  486. cnt++;
  487. }
  488. atomic_dec(&cpu_data->read_cnt);
  489. pmu_data.num_evs = cnt;
  490. /* send snapshot of pmu data to all registered idle clients */
  491. list_for_each_entry(idle_node, &idle_notif_list, node)
  492. idle_node->idle_cb(&pmu_data, dev->cpu, *state);
  493. }
  494. static void qcom_pmu_idle_exit_notif(void *unused, int state,
  495. struct cpuidle_device *dev)
  496. {
  497. struct cpu_data *cpu_data = per_cpu(cpu_ev_data, dev->cpu);
  498. cpu_data->is_idle = false;
  499. }
  500. static int memlat_pm_notif(struct notifier_block *nb, unsigned long action,
  501. void *data)
  502. {
  503. int cpu = smp_processor_id();
  504. struct cpu_data *cpu_data = per_cpu(cpu_ev_data, cpu);
  505. struct event_data *ev;
  506. int i, cid, aid;
  507. u32 count;
  508. bool pmu_valid = false;
  509. bool read_ev = true;
  510. struct cpucp_pmu_ctrs *base = pmu_base + (sizeof(struct cpucp_pmu_ctrs) * cpu);
  511. unsigned long flags;
  512. /* Exit if cpu is in hotplug */
  513. spin_lock_irqsave(&cpu_data->read_lock, flags);
  514. if (cpu_data->is_hp) {
  515. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  516. return NOTIFY_OK;
  517. }
  518. if (action == CPU_PM_EXIT) {
  519. if (pmu_base)
  520. writel_relaxed(0, &base->valid);
  521. cpu_data->is_pc = false;
  522. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  523. return NOTIFY_OK;
  524. }
  525. if (cpu_data->is_idle || cpu_data->is_pc)
  526. read_ev = false;
  527. else
  528. atomic_inc(&cpu_data->read_cnt);
  529. cpu_data->is_pc = true;
  530. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  531. if (!pmu_base)
  532. goto dec_read_cnt;
  533. for (i = 0; i < cpu_data->num_evs; i++) {
  534. ev = &cpu_data->events[i];
  535. cid = ev->cid;
  536. aid = ev->amu_id;
  537. if (!is_event_valid(ev) || !is_event_shared(ev))
  538. continue;
  539. if (read_ev)
  540. ev->cached_count = read_event(ev, true);
  541. /* Store pmu values in allocated cpucp pmu region */
  542. pmu_valid = true;
  543. count = cached_count_value(ev, ev->cached_count, is_amu_valid(aid));
  544. writel_relaxed(count, &base->evctrs[cid]);
  545. }
  546. /* Set valid cache flag to allow cpucp to read from this memory location */
  547. if (pmu_valid)
  548. writel_relaxed(1, &base->valid);
  549. dec_read_cnt:
  550. if (read_ev)
  551. atomic_dec(&cpu_data->read_cnt);
  552. return NOTIFY_OK;
  553. }
  554. static struct notifier_block memlat_event_pm_nb = {
  555. .notifier_call = memlat_pm_notif,
  556. };
  557. #if IS_ENABLED(CONFIG_HOTPLUG_CPU)
  558. static int qcom_pmu_hotplug_coming_up(unsigned int cpu)
  559. {
  560. struct perf_event_attr *attr = alloc_attr();
  561. struct cpu_data *cpu_data = per_cpu(cpu_ev_data, cpu);
  562. int i, ret = 0;
  563. unsigned long flags;
  564. struct event_data *ev;
  565. struct cpucp_pmu_ctrs *base = pmu_base + (sizeof(struct cpucp_pmu_ctrs) * cpu);
  566. cpumask_t mask;
  567. if (!attr)
  568. return -ENOMEM;
  569. if (!qcom_pmu_inited)
  570. goto out;
  571. for (i = 0; i < cpu_data->num_evs; i++) {
  572. ev = &cpu_data->events[i];
  573. ret = set_event(ev, cpu, attr);
  574. if (ret < 0) {
  575. pr_err("event %d not set for cpu %d ret %d\n",
  576. ev->event_id, cpu, ret);
  577. break;
  578. }
  579. }
  580. cpumask_clear(&mask);
  581. cpumask_set_cpu(cpu, &mask);
  582. configure_cpucp_map(mask);
  583. /* Set valid as 0 as exiting hotplug */
  584. if (pmu_base)
  585. writel_relaxed(0, &base->valid);
  586. spin_lock_irqsave(&cpu_data->read_lock, flags);
  587. cpu_data->is_hp = false;
  588. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  589. out:
  590. kfree(attr);
  591. return 0;
  592. }
  593. static int qcom_pmu_hotplug_going_down(unsigned int cpu)
  594. {
  595. struct cpu_data *cpu_data = per_cpu(cpu_ev_data, cpu);
  596. struct event_data *ev;
  597. int i, cid, aid;
  598. unsigned long flags;
  599. bool pmu_valid = false;
  600. u32 count;
  601. struct cpucp_pmu_ctrs *base = pmu_base + (sizeof(struct cpucp_pmu_ctrs) * cpu);
  602. if (!qcom_pmu_inited)
  603. return 0;
  604. spin_lock_irqsave(&cpu_data->read_lock, flags);
  605. cpu_data->is_hp = true;
  606. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  607. while (atomic_read(&cpu_data->read_cnt) > 0)
  608. udelay(10);
  609. for (i = 0; i < cpu_data->num_evs; i++) {
  610. ev = &cpu_data->events[i];
  611. cid = ev->cid;
  612. aid = ev->amu_id;
  613. if (!is_event_valid(ev))
  614. continue;
  615. ev->cached_count = read_event(ev, false);
  616. /* Store pmu values in allocated cpucp pmu region */
  617. if (pmu_base && is_event_shared(ev)) {
  618. pmu_valid = true;
  619. count = cached_count_value(ev, ev->cached_count, is_amu_valid(aid));
  620. writel_relaxed(count, &base->evctrs[cid]);
  621. }
  622. delete_event(ev);
  623. }
  624. if (pmu_valid)
  625. writel_relaxed(1, &base->valid);
  626. return 0;
  627. }
  628. static int qcom_pmu_cpu_hp_init(void)
  629. {
  630. int ret;
  631. ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
  632. "QCOM_PMU",
  633. qcom_pmu_hotplug_coming_up,
  634. qcom_pmu_hotplug_going_down);
  635. if (ret < 0)
  636. pr_err("qcom_pmu: CPU hotplug notifier error: %d\n",
  637. ret);
  638. return ret;
  639. }
  640. #else
  641. static int qcom_pmu_cpu_hp_init(void) { return 0; }
  642. #endif
  643. static void cache_counters(void)
  644. {
  645. struct cpu_data *cpu_data;
  646. int i, cid, aid;
  647. unsigned int cpu;
  648. struct event_data *event;
  649. struct cpucp_pmu_ctrs *base;
  650. bool pmu_valid;
  651. u32 count;
  652. for_each_possible_cpu(cpu) {
  653. cpu_data = per_cpu(cpu_ev_data, cpu);
  654. base = pmu_base + (sizeof(struct cpucp_pmu_ctrs) * cpu);
  655. pmu_valid = false;
  656. for (i = 0; i < cpu_data->num_evs; i++) {
  657. event = &cpu_data->events[i];
  658. cid = event->cid;
  659. aid = event->amu_id;
  660. if (!is_event_valid(event))
  661. continue;
  662. read_event(event, false);
  663. /* Store pmu values in allocated cpucp pmu region */
  664. if (pmu_base && is_event_shared(event)) {
  665. pmu_valid = true;
  666. count = cached_count_value(event, event->cached_count,
  667. is_amu_valid(aid));
  668. writel_relaxed(count, &base->evctrs[cid]);
  669. }
  670. }
  671. if (pmu_valid)
  672. writel_relaxed(1, &base->valid);
  673. }
  674. }
  675. static void delete_events(void)
  676. {
  677. int i;
  678. unsigned int cpu;
  679. struct cpu_data *cpu_data;
  680. struct event_data *event;
  681. unsigned long flags;
  682. if (cpuhp_state > 0)
  683. cpuhp_remove_state_nocalls(cpuhp_state);
  684. unregister_trace_android_vh_cpu_idle_enter(qcom_pmu_idle_enter_notif, NULL);
  685. unregister_trace_android_vh_cpu_idle_exit(qcom_pmu_idle_exit_notif, NULL);
  686. cpu_pm_unregister_notifier(&memlat_event_pm_nb);
  687. for_each_possible_cpu(cpu) {
  688. cpu_data = per_cpu(cpu_ev_data, cpu);
  689. spin_lock_irqsave(&cpu_data->read_lock, flags);
  690. cpu_data->is_hp = true;
  691. cpu_data->is_idle = true;
  692. cpu_data->is_pc = true;
  693. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  694. }
  695. for_each_possible_cpu(cpu) {
  696. cpu_data = per_cpu(cpu_ev_data, cpu);
  697. while (atomic_read(&cpu_data->read_cnt) > 0)
  698. udelay(10);
  699. for (i = 0; i < cpu_data->num_evs; i++) {
  700. event = &cpu_data->events[i];
  701. if (!is_event_valid(event))
  702. continue;
  703. delete_event(event);
  704. }
  705. }
  706. }
  707. static void unload_pmu_counters(void)
  708. {
  709. if (!qcom_pmu_inited || !pmu_counters_enabled)
  710. return;
  711. cache_counters();
  712. delete_events();
  713. pr_info("Disabled all perf counters\n");
  714. pmu_counters_enabled = false;
  715. }
  716. static int setup_events(void)
  717. {
  718. struct perf_event_attr *attr = alloc_attr();
  719. struct cpu_data *cpu_data;
  720. int i, ret = 0;
  721. unsigned int cpu;
  722. struct event_data *event;
  723. unsigned long flags;
  724. if (!attr)
  725. return -ENOMEM;
  726. cpus_read_lock();
  727. for_each_possible_cpu(cpu) {
  728. cpu_data = per_cpu(cpu_ev_data, cpu);
  729. for (i = 0; i < cpu_data->num_evs; i++) {
  730. event = &cpu_data->events[i];
  731. ret = set_event(event, cpu, attr);
  732. if (ret < 0) {
  733. pr_err("event %d not set for cpu %d ret %d\n",
  734. event->event_id, cpu, ret);
  735. event->event_id = 0;
  736. /*
  737. * Only return error for -EPROBE_DEFER. Clear
  738. * ret for all other cases as it is okay for
  739. * some events to fail.
  740. */
  741. if (ret == -EPROBE_DEFER)
  742. goto cleanup_events;
  743. else
  744. ret = 0;
  745. }
  746. }
  747. spin_lock_irqsave(&cpu_data->read_lock, flags);
  748. cpu_data->is_hp = !cpumask_test_cpu(cpu, cpu_online_mask);
  749. cpu_data->is_idle = false;
  750. cpu_data->is_pc = false;
  751. spin_unlock_irqrestore(&cpu_data->read_lock, flags);
  752. }
  753. cpuhp_state = qcom_pmu_cpu_hp_init();
  754. if (cpuhp_state < 0) {
  755. ret = cpuhp_state;
  756. pr_err("qcom pmu driver failed to initialize hotplug: %d\n", ret);
  757. goto out;
  758. }
  759. goto out;
  760. cleanup_events:
  761. for_each_possible_cpu(cpu) {
  762. cpu_data = per_cpu(cpu_ev_data, cpu);
  763. for (i = 0; i < cpu_data->num_evs; i++) {
  764. event = &cpu_data->events[i];
  765. delete_event(event);
  766. }
  767. }
  768. out:
  769. cpus_read_unlock();
  770. if (ret != -EPROBE_DEFER && ret != cpuhp_state) {
  771. register_trace_android_vh_cpu_idle_enter(qcom_pmu_idle_enter_notif, NULL);
  772. register_trace_android_vh_cpu_idle_exit(qcom_pmu_idle_exit_notif, NULL);
  773. cpu_pm_register_notifier(&memlat_event_pm_nb);
  774. }
  775. kfree(attr);
  776. return ret;
  777. }
  778. static void load_pmu_counters(void)
  779. {
  780. int ret;
  781. if (pmu_counters_enabled)
  782. return;
  783. ret = setup_events();
  784. if (ret < 0) {
  785. pr_err("Error setting up counters %d\n", ret);
  786. return;
  787. }
  788. configure_cpucp_map(*cpu_possible_mask);
  789. pmu_counters_enabled = true;
  790. pr_info("Enabled all perf counters\n");
  791. }
  792. static void get_mpidr_cpu(void *cpu)
  793. {
  794. u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
  795. *((uint32_t *)cpu) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  796. }
  797. int cpucp_pmu_init(struct scmi_device *sdev)
  798. {
  799. int ret = 0;
  800. uint32_t cpu, pcpu;
  801. if (!sdev || !sdev->handle)
  802. return -EINVAL;
  803. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  804. ops = sdev->handle->devm_protocol_get(sdev, QCOM_SCMI_VENDOR_PROTOCOL, &ph);
  805. #else
  806. ops = sdev->handle->devm_protocol_get(sdev, SCMI_PMU_PROTOCOL, &ph);
  807. #endif
  808. if (IS_ERR(ops)) {
  809. ret = PTR_ERR(ops);
  810. ops = NULL;
  811. return ret;
  812. }
  813. for_each_possible_cpu(cpu) {
  814. smp_call_function_single(cpu, get_mpidr_cpu,
  815. &pcpu, true);
  816. phys_cpu[cpu] = pcpu;
  817. }
  818. /*
  819. * If communication with cpucp doesn't succeed here the device memory
  820. * will be de-allocated. Make ops NULL to avoid further scmi calls.
  821. */
  822. ret = configure_cpucp_map(*cpu_possible_mask);
  823. if (ret < 0) {
  824. ops = NULL;
  825. return ret;
  826. }
  827. ret = events_caching_enable();
  828. if (ret < 0)
  829. ops = NULL;
  830. return ret;
  831. }
  832. EXPORT_SYMBOL(cpucp_pmu_init);
  833. static int configure_pmu_event(u32 event_id, int amu_id, int cid, int cpu)
  834. {
  835. struct cpu_data *cpu_data;
  836. struct event_data *event;
  837. if (!event_id || cpu >= num_possible_cpus())
  838. return -EINVAL;
  839. cpu_data = per_cpu(cpu_ev_data, cpu);
  840. if (cpu_data->num_evs >= MAX_PMU_EVS)
  841. return -ENOSPC;
  842. event = &cpu_data->events[cpu_data->num_evs];
  843. event->event_id = event_id;
  844. event->amu_id = amu_id;
  845. event->cid = cid;
  846. cpu_data->num_evs++;
  847. return 0;
  848. }
  849. #define PMU_TBL_PROP "qcom,pmu-events-tbl"
  850. #define NUM_COL 4
  851. static int init_pmu_events(struct device *dev)
  852. {
  853. struct device_node *of_node = dev->of_node;
  854. int ret, len, i, j, cpu;
  855. u32 data = 0, event_id, cid;
  856. unsigned long cpus;
  857. int amu_id;
  858. if (of_find_property(of_node, "qcom,long-counter", &len))
  859. pmu_long_counter = true;
  860. if (!of_find_property(of_node, PMU_TBL_PROP, &len))
  861. return -ENODEV;
  862. len /= sizeof(data);
  863. if (len % NUM_COL || len == 0)
  864. return -EINVAL;
  865. len /= NUM_COL;
  866. if (len >= MAX_PMU_EVS)
  867. return -ENOSPC;
  868. for (i = 0, j = 0; i < len; i++, j += NUM_COL) {
  869. ret = of_property_read_u32_index(of_node, PMU_TBL_PROP, j,
  870. &event_id);
  871. if (ret < 0 || !event_id)
  872. return -EINVAL;
  873. ret = of_property_read_u32_index(of_node, PMU_TBL_PROP, j + 1,
  874. &data);
  875. if (ret < 0 || !data)
  876. return -EINVAL;
  877. cpus = (unsigned long)data;
  878. ret = of_property_read_u32_index(of_node, PMU_TBL_PROP, j + 2,
  879. &amu_id);
  880. if (ret < 0)
  881. return -EINVAL;
  882. ret = of_property_read_u32_index(of_node, PMU_TBL_PROP, j + 3,
  883. &cid);
  884. if (ret < 0)
  885. return -EINVAL;
  886. for_each_cpu(cpu, to_cpumask(&cpus)) {
  887. ret = configure_pmu_event(event_id, amu_id, cid, cpu);
  888. if (ret < 0)
  889. return ret;
  890. }
  891. if (is_cid_valid(cid)) {
  892. cpucp_map[cid].shared = true;
  893. cpucp_map[cid].cpus = cpus;
  894. }
  895. dev_dbg(dev, "entry=%d: ev=%lu, cpus=%lu cpucp id=%lu amu_id=%d\n",
  896. i, event_id, cpus, cid, amu_id);
  897. }
  898. return 0;
  899. }
  900. struct qcom_pmu_attr {
  901. struct attribute attr;
  902. ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
  903. char *buf);
  904. ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
  905. const char *buf, size_t count);
  906. };
  907. #define to_pmu_attr(_attr) \
  908. container_of(_attr, struct qcom_pmu_attr, attr)
  909. #define PMU_ATTR_RW(_name) \
  910. static struct qcom_pmu_attr _name = \
  911. __ATTR(_name, 0644, show_##_name, store_##_name)\
  912. static ssize_t show_enable_counters(struct kobject *kobj,
  913. struct attribute *attr, char *buf)
  914. {
  915. return scnprintf(buf, PAGE_SIZE, "%u\n", pmu_counters_enabled);
  916. }
  917. #define DISABLE_MAGIC "DEADBEEF"
  918. #define ENABLE_MAGIC "BEEFDEAD"
  919. static ssize_t store_enable_counters(struct kobject *kobj,
  920. struct attribute *attr, const char *buf,
  921. size_t count)
  922. {
  923. if (sysfs_streq(buf, ENABLE_MAGIC))
  924. load_pmu_counters();
  925. else if (sysfs_streq(buf, DISABLE_MAGIC))
  926. unload_pmu_counters();
  927. return count;
  928. }
  929. static ssize_t show_enable_trace(struct kobject *kobj,
  930. struct attribute *attr, char *buf)
  931. {
  932. return scnprintf(buf, PAGE_SIZE, "%u\n", pmu_enable_trace);
  933. }
  934. static ssize_t store_enable_trace(struct kobject *kobj,
  935. struct attribute *attr, const char *buf,
  936. size_t count)
  937. {
  938. unsigned int var;
  939. int ret;
  940. if (!ops)
  941. return -ENODEV;
  942. ret = kstrtouint(buf, 10, &var);
  943. if (ret < 0)
  944. return ret;
  945. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  946. ret = ops->set_param(ph, &var, PMUMAP_ALGO_STR, SET_ENABLE_TRACE, sizeof(var));
  947. #else
  948. ret = ops->set_enable_trace(ph, &var);
  949. #endif
  950. if (ret < 0) {
  951. pr_err("failed to set enable_trace tunable: %d\n", ret);
  952. return ret;
  953. }
  954. pmu_enable_trace = var;
  955. return count;
  956. }
  957. PMU_ATTR_RW(enable_counters);
  958. PMU_ATTR_RW(enable_trace);
  959. static struct attribute *pmu_settings_attrs[] = {
  960. &enable_counters.attr,
  961. &enable_trace.attr,
  962. NULL,
  963. };
  964. ATTRIBUTE_GROUPS(pmu_settings);
  965. static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
  966. char *buf)
  967. {
  968. struct qcom_pmu_attr *pmu_attr = to_pmu_attr(attr);
  969. ssize_t ret = -EIO;
  970. if (pmu_attr->show)
  971. ret = pmu_attr->show(kobj, attr, buf);
  972. return ret;
  973. }
  974. static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
  975. const char *buf, size_t count)
  976. {
  977. struct qcom_pmu_attr *pmu_attr = to_pmu_attr(attr);
  978. ssize_t ret = -EIO;
  979. if (pmu_attr->store)
  980. ret = pmu_attr->store(kobj, attr, buf, count);
  981. return ret;
  982. }
  983. static const struct sysfs_ops pmu_sysfs_ops = {
  984. .show = attr_show,
  985. .store = attr_store,
  986. };
  987. static struct kobj_type pmu_settings_ktype = {
  988. .sysfs_ops = &pmu_sysfs_ops,
  989. .default_groups = pmu_settings_groups,
  990. };
  991. static int qcom_pmu_driver_probe(struct platform_device *pdev)
  992. {
  993. struct device *dev = &pdev->dev;
  994. int ret = 0, idx, len;
  995. unsigned int cpu;
  996. struct cpu_data *cpu_data;
  997. struct resource res;
  998. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  999. int cpucp_ret = 0;
  1000. struct scmi_device *scmi_dev;
  1001. scmi_dev = get_qcom_scmi_device();
  1002. if (IS_ERR(scmi_dev)) {
  1003. ret = PTR_ERR(scmi_dev);
  1004. if (ret == -EPROBE_DEFER)
  1005. return ret;
  1006. dev_err(dev, "Error getting scmi_dev ret = %d\n", ret);
  1007. }
  1008. #endif
  1009. if (!pmu_base) {
  1010. idx = of_property_match_string(dev->of_node, "reg-names", "pmu-base");
  1011. if (idx < 0) {
  1012. dev_dbg(dev, "pmu base not found\n");
  1013. goto skip_pmu;
  1014. }
  1015. ret = of_address_to_resource(dev->of_node, idx, &res);
  1016. if (ret < 0) {
  1017. dev_err(dev, "failed to get resource ret %d\n", ret);
  1018. goto skip_pmu;
  1019. }
  1020. pmu_base = devm_ioremap(dev, res.start, resource_size(&res));
  1021. if (!pmu_base)
  1022. goto skip_pmu;
  1023. /* Zero out the pmu memory region */
  1024. memset_io(pmu_base, 0, resource_size(&res));
  1025. }
  1026. skip_pmu:
  1027. if (of_find_property(dev->of_node, "qcom,ignore-llcc-setup", &len)) {
  1028. dev_dbg(dev, "Ignoring llcc setup\n");
  1029. llcc_ignore_setup = true;
  1030. }
  1031. for_each_possible_cpu(cpu) {
  1032. cpu_data = devm_kzalloc(dev, sizeof(*cpu_data), GFP_KERNEL);
  1033. if (!cpu_data)
  1034. return -ENOMEM;
  1035. spin_lock_init(&cpu_data->read_lock);
  1036. atomic_set(&cpu_data->read_cnt, 0);
  1037. per_cpu(cpu_ev_data, cpu) = cpu_data;
  1038. }
  1039. ret = init_pmu_events(dev);
  1040. if (ret < 0) {
  1041. dev_err(dev, "failed to initialize pmu events: %d\n", ret);
  1042. return ret;
  1043. }
  1044. ret = kobject_init_and_add(&pmu_kobj, &pmu_settings_ktype,
  1045. &cpu_subsys.dev_root->kobj, "pmu_lib");
  1046. if (ret < 0) {
  1047. dev_err(dev, "failed to init pmu counters kobj: %d\n", ret);
  1048. kobject_put(&pmu_kobj);
  1049. return ret;
  1050. }
  1051. ret = setup_events();
  1052. if (ret < 0) {
  1053. dev_err(dev, "failed to setup all pmu/amu events: %d\n", ret);
  1054. kobject_put(&pmu_kobj);
  1055. return ret;
  1056. }
  1057. qcom_pmu_inited = true;
  1058. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  1059. if (!IS_ERR(scmi_dev)) {
  1060. cpucp_ret = cpucp_pmu_init(scmi_dev);
  1061. if (cpucp_ret < 0)
  1062. dev_err(dev, "Err during cpucp_pmu_init ret = %d\n", cpucp_ret);
  1063. }
  1064. #endif
  1065. return ret;
  1066. }
  1067. static int qcom_pmu_driver_remove(struct platform_device *pdev)
  1068. {
  1069. qcom_pmu_inited = false;
  1070. delete_events();
  1071. return 0;
  1072. }
  1073. static const struct of_device_id pmu_match_table[] = {
  1074. { .compatible = "qcom,pmu" },
  1075. {}
  1076. };
  1077. static struct platform_driver qcom_pmu_driver = {
  1078. .probe = qcom_pmu_driver_probe,
  1079. .remove = qcom_pmu_driver_remove,
  1080. .driver = {
  1081. .name = "qcom-pmu",
  1082. .of_match_table = pmu_match_table,
  1083. .suppress_bind_attrs = true,
  1084. },
  1085. };
  1086. module_platform_driver(qcom_pmu_driver);
  1087. #if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL)
  1088. MODULE_SOFTDEP("pre: qcom_scmi_client");
  1089. #endif
  1090. MODULE_DESCRIPTION("QCOM PMU Driver");
  1091. MODULE_LICENSE("GPL");