hw_breakpoint.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2007 Alan Stern
  4. * Copyright (C) IBM Corporation, 2009
  5. * Copyright (C) 2009, Frederic Weisbecker <[email protected]>
  6. *
  7. * Thanks to Ingo Molnar for his many suggestions.
  8. *
  9. * Authors: Alan Stern <[email protected]>
  10. * K.Prasad <[email protected]>
  11. * Frederic Weisbecker <[email protected]>
  12. */
  13. /*
  14. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  15. * using the CPU's debug registers.
  16. * This file contains the arch-independent routines.
  17. */
  18. #include <linux/hw_breakpoint.h>
  19. #include <linux/atomic.h>
  20. #include <linux/bug.h>
  21. #include <linux/cpu.h>
  22. #include <linux/export.h>
  23. #include <linux/init.h>
  24. #include <linux/irqflags.h>
  25. #include <linux/kdebug.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mutex.h>
  28. #include <linux/notifier.h>
  29. #include <linux/percpu-rwsem.h>
  30. #include <linux/percpu.h>
  31. #include <linux/rhashtable.h>
  32. #include <linux/sched.h>
  33. #include <linux/slab.h>
  34. /*
  35. * Datastructure to track the total uses of N slots across tasks or CPUs;
  36. * bp_slots_histogram::count[N] is the number of assigned N+1 breakpoint slots.
  37. */
  38. struct bp_slots_histogram {
  39. #ifdef hw_breakpoint_slots
  40. atomic_t count[hw_breakpoint_slots(0)];
  41. #else
  42. atomic_t *count;
  43. #endif
  44. };
  45. /*
  46. * Per-CPU constraints data.
  47. */
  48. struct bp_cpuinfo {
  49. /* Number of pinned CPU breakpoints in a CPU. */
  50. unsigned int cpu_pinned;
  51. /* Histogram of pinned task breakpoints in a CPU. */
  52. struct bp_slots_histogram tsk_pinned;
  53. };
  54. static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
  55. static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
  56. {
  57. return per_cpu_ptr(bp_cpuinfo + type, cpu);
  58. }
  59. /* Number of pinned CPU breakpoints globally. */
  60. static struct bp_slots_histogram cpu_pinned[TYPE_MAX];
  61. /* Number of pinned CPU-independent task breakpoints. */
  62. static struct bp_slots_histogram tsk_pinned_all[TYPE_MAX];
  63. /* Keep track of the breakpoints attached to tasks */
  64. static struct rhltable task_bps_ht;
  65. static const struct rhashtable_params task_bps_ht_params = {
  66. .head_offset = offsetof(struct hw_perf_event, bp_list),
  67. .key_offset = offsetof(struct hw_perf_event, target),
  68. .key_len = sizeof_field(struct hw_perf_event, target),
  69. .automatic_shrinking = true,
  70. };
  71. static bool constraints_initialized __ro_after_init;
  72. /*
  73. * Synchronizes accesses to the per-CPU constraints; the locking rules are:
  74. *
  75. * 1. Atomic updates to bp_cpuinfo::tsk_pinned only require a held read-lock
  76. * (due to bp_slots_histogram::count being atomic, no update are lost).
  77. *
  78. * 2. Holding a write-lock is required for computations that require a
  79. * stable snapshot of all bp_cpuinfo::tsk_pinned.
  80. *
  81. * 3. In all other cases, non-atomic accesses require the appropriately held
  82. * lock (read-lock for read-only accesses; write-lock for reads/writes).
  83. */
  84. DEFINE_STATIC_PERCPU_RWSEM(bp_cpuinfo_sem);
  85. /*
  86. * Return mutex to serialize accesses to per-task lists in task_bps_ht. Since
  87. * rhltable synchronizes concurrent insertions/deletions, independent tasks may
  88. * insert/delete concurrently; therefore, a mutex per task is sufficient.
  89. *
  90. * Uses task_struct::perf_event_mutex, to avoid extending task_struct with a
  91. * hw_breakpoint-only mutex, which may be infrequently used. The caveat here is
  92. * that hw_breakpoint may contend with per-task perf event list management. The
  93. * assumption is that perf usecases involving hw_breakpoints are very unlikely
  94. * to result in unnecessary contention.
  95. */
  96. static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)
  97. {
  98. struct task_struct *tsk = bp->hw.target;
  99. return tsk ? &tsk->perf_event_mutex : NULL;
  100. }
  101. static struct mutex *bp_constraints_lock(struct perf_event *bp)
  102. {
  103. struct mutex *tsk_mtx = get_task_bps_mutex(bp);
  104. if (tsk_mtx) {
  105. /*
  106. * Fully analogous to the perf_try_init_event() nesting
  107. * argument in the comment near perf_event_ctx_lock_nested();
  108. * this child->perf_event_mutex cannot ever deadlock against
  109. * the parent->perf_event_mutex usage from
  110. * perf_event_task_{en,dis}able().
  111. *
  112. * Specifically, inherited events will never occur on
  113. * ->perf_event_list.
  114. */
  115. mutex_lock_nested(tsk_mtx, SINGLE_DEPTH_NESTING);
  116. percpu_down_read(&bp_cpuinfo_sem);
  117. } else {
  118. percpu_down_write(&bp_cpuinfo_sem);
  119. }
  120. return tsk_mtx;
  121. }
  122. static void bp_constraints_unlock(struct mutex *tsk_mtx)
  123. {
  124. if (tsk_mtx) {
  125. percpu_up_read(&bp_cpuinfo_sem);
  126. mutex_unlock(tsk_mtx);
  127. } else {
  128. percpu_up_write(&bp_cpuinfo_sem);
  129. }
  130. }
  131. static bool bp_constraints_is_locked(struct perf_event *bp)
  132. {
  133. struct mutex *tsk_mtx = get_task_bps_mutex(bp);
  134. return percpu_is_write_locked(&bp_cpuinfo_sem) ||
  135. (tsk_mtx ? mutex_is_locked(tsk_mtx) :
  136. percpu_is_read_locked(&bp_cpuinfo_sem));
  137. }
  138. static inline void assert_bp_constraints_lock_held(struct perf_event *bp)
  139. {
  140. struct mutex *tsk_mtx = get_task_bps_mutex(bp);
  141. if (tsk_mtx)
  142. lockdep_assert_held(tsk_mtx);
  143. lockdep_assert_held(&bp_cpuinfo_sem);
  144. }
  145. #ifdef hw_breakpoint_slots
  146. /*
  147. * Number of breakpoint slots is constant, and the same for all types.
  148. */
  149. static_assert(hw_breakpoint_slots(TYPE_INST) == hw_breakpoint_slots(TYPE_DATA));
  150. static inline int hw_breakpoint_slots_cached(int type) { return hw_breakpoint_slots(type); }
  151. static inline int init_breakpoint_slots(void) { return 0; }
  152. #else
  153. /*
  154. * Dynamic number of breakpoint slots.
  155. */
  156. static int __nr_bp_slots[TYPE_MAX] __ro_after_init;
  157. static inline int hw_breakpoint_slots_cached(int type)
  158. {
  159. return __nr_bp_slots[type];
  160. }
  161. static __init bool
  162. bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
  163. {
  164. hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
  165. return hist->count;
  166. }
  167. static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
  168. {
  169. kfree(hist->count);
  170. }
  171. static __init int init_breakpoint_slots(void)
  172. {
  173. int i, cpu, err_cpu;
  174. for (i = 0; i < TYPE_MAX; i++)
  175. __nr_bp_slots[i] = hw_breakpoint_slots(i);
  176. for_each_possible_cpu(cpu) {
  177. for (i = 0; i < TYPE_MAX; i++) {
  178. struct bp_cpuinfo *info = get_bp_info(cpu, i);
  179. if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
  180. goto err;
  181. }
  182. }
  183. for (i = 0; i < TYPE_MAX; i++) {
  184. if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
  185. goto err;
  186. if (!bp_slots_histogram_alloc(&tsk_pinned_all[i], i))
  187. goto err;
  188. }
  189. return 0;
  190. err:
  191. for_each_possible_cpu(err_cpu) {
  192. for (i = 0; i < TYPE_MAX; i++)
  193. bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
  194. if (err_cpu == cpu)
  195. break;
  196. }
  197. for (i = 0; i < TYPE_MAX; i++) {
  198. bp_slots_histogram_free(&cpu_pinned[i]);
  199. bp_slots_histogram_free(&tsk_pinned_all[i]);
  200. }
  201. return -ENOMEM;
  202. }
  203. #endif
  204. static inline void
  205. bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
  206. {
  207. const int old_idx = old - 1;
  208. const int new_idx = old_idx + val;
  209. if (old_idx >= 0)
  210. WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0);
  211. if (new_idx >= 0)
  212. WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0);
  213. }
  214. static int
  215. bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
  216. {
  217. for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
  218. const int count = atomic_read(&hist->count[i]);
  219. /* Catch unexpected writers; we want a stable snapshot. */
  220. ASSERT_EXCLUSIVE_WRITER(hist->count[i]);
  221. if (count > 0)
  222. return i + 1;
  223. WARN(count < 0, "inconsistent breakpoint slots histogram");
  224. }
  225. return 0;
  226. }
  227. static int
  228. bp_slots_histogram_max_merge(struct bp_slots_histogram *hist1, struct bp_slots_histogram *hist2,
  229. enum bp_type_idx type)
  230. {
  231. for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
  232. const int count1 = atomic_read(&hist1->count[i]);
  233. const int count2 = atomic_read(&hist2->count[i]);
  234. /* Catch unexpected writers; we want a stable snapshot. */
  235. ASSERT_EXCLUSIVE_WRITER(hist1->count[i]);
  236. ASSERT_EXCLUSIVE_WRITER(hist2->count[i]);
  237. if (count1 + count2 > 0)
  238. return i + 1;
  239. WARN(count1 < 0, "inconsistent breakpoint slots histogram");
  240. WARN(count2 < 0, "inconsistent breakpoint slots histogram");
  241. }
  242. return 0;
  243. }
  244. #ifndef hw_breakpoint_weight
  245. static inline int hw_breakpoint_weight(struct perf_event *bp)
  246. {
  247. return 1;
  248. }
  249. #endif
  250. static inline enum bp_type_idx find_slot_idx(u64 bp_type)
  251. {
  252. if (bp_type & HW_BREAKPOINT_RW)
  253. return TYPE_DATA;
  254. return TYPE_INST;
  255. }
  256. /*
  257. * Return the maximum number of pinned breakpoints a task has in this CPU.
  258. */
  259. static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  260. {
  261. struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
  262. /*
  263. * At this point we want to have acquired the bp_cpuinfo_sem as a
  264. * writer to ensure that there are no concurrent writers in
  265. * toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot.
  266. */
  267. lockdep_assert_held_write(&bp_cpuinfo_sem);
  268. return bp_slots_histogram_max_merge(tsk_pinned, &tsk_pinned_all[type], type);
  269. }
  270. /*
  271. * Count the number of breakpoints of the same type and same task.
  272. * The given event must be not on the list.
  273. *
  274. * If @cpu is -1, but the result of task_bp_pinned() is not CPU-independent,
  275. * returns a negative value.
  276. */
  277. static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
  278. {
  279. struct rhlist_head *head, *pos;
  280. struct perf_event *iter;
  281. int count = 0;
  282. /*
  283. * We need a stable snapshot of the per-task breakpoint list.
  284. */
  285. assert_bp_constraints_lock_held(bp);
  286. rcu_read_lock();
  287. head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params);
  288. if (!head)
  289. goto out;
  290. rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
  291. if (find_slot_idx(iter->attr.bp_type) != type)
  292. continue;
  293. if (iter->cpu >= 0) {
  294. if (cpu == -1) {
  295. count = -1;
  296. goto out;
  297. } else if (cpu != iter->cpu)
  298. continue;
  299. }
  300. count += hw_breakpoint_weight(iter);
  301. }
  302. out:
  303. rcu_read_unlock();
  304. return count;
  305. }
  306. static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
  307. {
  308. if (bp->cpu >= 0)
  309. return cpumask_of(bp->cpu);
  310. return cpu_possible_mask;
  311. }
  312. /*
  313. * Returns the max pinned breakpoint slots in a given
  314. * CPU (cpu > -1) or across all of them (cpu = -1).
  315. */
  316. static int
  317. max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
  318. {
  319. const struct cpumask *cpumask = cpumask_of_bp(bp);
  320. int pinned_slots = 0;
  321. int cpu;
  322. if (bp->hw.target && bp->cpu < 0) {
  323. int max_pinned = task_bp_pinned(-1, bp, type);
  324. if (max_pinned >= 0) {
  325. /*
  326. * Fast path: task_bp_pinned() is CPU-independent and
  327. * returns the same value for any CPU.
  328. */
  329. max_pinned += bp_slots_histogram_max(&cpu_pinned[type], type);
  330. return max_pinned;
  331. }
  332. }
  333. for_each_cpu(cpu, cpumask) {
  334. struct bp_cpuinfo *info = get_bp_info(cpu, type);
  335. int nr;
  336. nr = info->cpu_pinned;
  337. if (!bp->hw.target)
  338. nr += max_task_bp_pinned(cpu, type);
  339. else
  340. nr += task_bp_pinned(cpu, bp, type);
  341. pinned_slots = max(nr, pinned_slots);
  342. }
  343. return pinned_slots;
  344. }
  345. /*
  346. * Add/remove the given breakpoint in our constraint table
  347. */
  348. static int
  349. toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight)
  350. {
  351. int cpu, next_tsk_pinned;
  352. if (!enable)
  353. weight = -weight;
  354. if (!bp->hw.target) {
  355. /*
  356. * Update the pinned CPU slots, in per-CPU bp_cpuinfo and in the
  357. * global histogram.
  358. */
  359. struct bp_cpuinfo *info = get_bp_info(bp->cpu, type);
  360. lockdep_assert_held_write(&bp_cpuinfo_sem);
  361. bp_slots_histogram_add(&cpu_pinned[type], info->cpu_pinned, weight);
  362. info->cpu_pinned += weight;
  363. return 0;
  364. }
  365. /*
  366. * If bp->hw.target, tsk_pinned is only modified, but not used
  367. * otherwise. We can permit concurrent updates as long as there are no
  368. * other uses: having acquired bp_cpuinfo_sem as a reader allows
  369. * concurrent updates here. Uses of tsk_pinned will require acquiring
  370. * bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value.
  371. */
  372. lockdep_assert_held_read(&bp_cpuinfo_sem);
  373. /*
  374. * Update the pinned task slots, in per-CPU bp_cpuinfo and in the global
  375. * histogram. We need to take care of 4 cases:
  376. *
  377. * 1. This breakpoint targets all CPUs (cpu < 0), and there may only
  378. * exist other task breakpoints targeting all CPUs. In this case we
  379. * can simply update the global slots histogram.
  380. *
  381. * 2. This breakpoint targets a specific CPU (cpu >= 0), but there may
  382. * only exist other task breakpoints targeting all CPUs.
  383. *
  384. * a. On enable: remove the existing breakpoints from the global
  385. * slots histogram and use the per-CPU histogram.
  386. *
  387. * b. On disable: re-insert the existing breakpoints into the global
  388. * slots histogram and remove from per-CPU histogram.
  389. *
  390. * 3. Some other existing task breakpoints target specific CPUs. Only
  391. * update the per-CPU slots histogram.
  392. */
  393. if (!enable) {
  394. /*
  395. * Remove before updating histograms so we can determine if this
  396. * was the last task breakpoint for a specific CPU.
  397. */
  398. int ret = rhltable_remove(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
  399. if (ret)
  400. return ret;
  401. }
  402. /*
  403. * Note: If !enable, next_tsk_pinned will not count the to-be-removed breakpoint.
  404. */
  405. next_tsk_pinned = task_bp_pinned(-1, bp, type);
  406. if (next_tsk_pinned >= 0) {
  407. if (bp->cpu < 0) { /* Case 1: fast path */
  408. if (!enable)
  409. next_tsk_pinned += hw_breakpoint_weight(bp);
  410. bp_slots_histogram_add(&tsk_pinned_all[type], next_tsk_pinned, weight);
  411. } else if (enable) { /* Case 2.a: slow path */
  412. /* Add existing to per-CPU histograms. */
  413. for_each_possible_cpu(cpu) {
  414. bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
  415. 0, next_tsk_pinned);
  416. }
  417. /* Add this first CPU-pinned task breakpoint. */
  418. bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
  419. next_tsk_pinned, weight);
  420. /* Rebalance global task pinned histogram. */
  421. bp_slots_histogram_add(&tsk_pinned_all[type], next_tsk_pinned,
  422. -next_tsk_pinned);
  423. } else { /* Case 2.b: slow path */
  424. /* Remove this last CPU-pinned task breakpoint. */
  425. bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
  426. next_tsk_pinned + hw_breakpoint_weight(bp), weight);
  427. /* Remove all from per-CPU histograms. */
  428. for_each_possible_cpu(cpu) {
  429. bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
  430. next_tsk_pinned, -next_tsk_pinned);
  431. }
  432. /* Rebalance global task pinned histogram. */
  433. bp_slots_histogram_add(&tsk_pinned_all[type], 0, next_tsk_pinned);
  434. }
  435. } else { /* Case 3: slow path */
  436. const struct cpumask *cpumask = cpumask_of_bp(bp);
  437. for_each_cpu(cpu, cpumask) {
  438. next_tsk_pinned = task_bp_pinned(cpu, bp, type);
  439. if (!enable)
  440. next_tsk_pinned += hw_breakpoint_weight(bp);
  441. bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
  442. next_tsk_pinned, weight);
  443. }
  444. }
  445. /*
  446. * Readers want a stable snapshot of the per-task breakpoint list.
  447. */
  448. assert_bp_constraints_lock_held(bp);
  449. if (enable)
  450. return rhltable_insert(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
  451. return 0;
  452. }
  453. __weak int arch_reserve_bp_slot(struct perf_event *bp)
  454. {
  455. return 0;
  456. }
  457. __weak void arch_release_bp_slot(struct perf_event *bp)
  458. {
  459. }
  460. /*
  461. * Function to perform processor-specific cleanup during unregistration
  462. */
  463. __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  464. {
  465. /*
  466. * A weak stub function here for those archs that don't define
  467. * it inside arch/.../kernel/hw_breakpoint.c
  468. */
  469. }
  470. /*
  471. * Constraints to check before allowing this new breakpoint counter.
  472. *
  473. * Note: Flexible breakpoints are currently unimplemented, but outlined in the
  474. * below algorithm for completeness. The implementation treats flexible as
  475. * pinned due to no guarantee that we currently always schedule flexible events
  476. * before a pinned event in a same CPU.
  477. *
  478. * == Non-pinned counter == (Considered as pinned for now)
  479. *
  480. * - If attached to a single cpu, check:
  481. *
  482. * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
  483. * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
  484. *
  485. * -> If there are already non-pinned counters in this cpu, it means
  486. * there is already a free slot for them.
  487. * Otherwise, we check that the maximum number of per task
  488. * breakpoints (for this cpu) plus the number of per cpu breakpoint
  489. * (for this cpu) doesn't cover every registers.
  490. *
  491. * - If attached to every cpus, check:
  492. *
  493. * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
  494. * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
  495. *
  496. * -> This is roughly the same, except we check the number of per cpu
  497. * bp for every cpu and we keep the max one. Same for the per tasks
  498. * breakpoints.
  499. *
  500. *
  501. * == Pinned counter ==
  502. *
  503. * - If attached to a single cpu, check:
  504. *
  505. * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
  506. * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
  507. *
  508. * -> Same checks as before. But now the info->flexible, if any, must keep
  509. * one register at least (or they will never be fed).
  510. *
  511. * - If attached to every cpus, check:
  512. *
  513. * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
  514. * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
  515. */
  516. static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
  517. {
  518. enum bp_type_idx type;
  519. int max_pinned_slots;
  520. int weight;
  521. int ret;
  522. /* We couldn't initialize breakpoint constraints on boot */
  523. if (!constraints_initialized)
  524. return -ENOMEM;
  525. /* Basic checks */
  526. if (bp_type == HW_BREAKPOINT_EMPTY ||
  527. bp_type == HW_BREAKPOINT_INVALID)
  528. return -EINVAL;
  529. type = find_slot_idx(bp_type);
  530. weight = hw_breakpoint_weight(bp);
  531. /* Check if this new breakpoint can be satisfied across all CPUs. */
  532. max_pinned_slots = max_bp_pinned_slots(bp, type) + weight;
  533. if (max_pinned_slots > hw_breakpoint_slots_cached(type))
  534. return -ENOSPC;
  535. ret = arch_reserve_bp_slot(bp);
  536. if (ret)
  537. return ret;
  538. return toggle_bp_slot(bp, true, type, weight);
  539. }
  540. int reserve_bp_slot(struct perf_event *bp)
  541. {
  542. struct mutex *mtx = bp_constraints_lock(bp);
  543. int ret = __reserve_bp_slot(bp, bp->attr.bp_type);
  544. bp_constraints_unlock(mtx);
  545. return ret;
  546. }
  547. static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
  548. {
  549. enum bp_type_idx type;
  550. int weight;
  551. arch_release_bp_slot(bp);
  552. type = find_slot_idx(bp_type);
  553. weight = hw_breakpoint_weight(bp);
  554. WARN_ON(toggle_bp_slot(bp, false, type, weight));
  555. }
  556. void release_bp_slot(struct perf_event *bp)
  557. {
  558. struct mutex *mtx = bp_constraints_lock(bp);
  559. arch_unregister_hw_breakpoint(bp);
  560. __release_bp_slot(bp, bp->attr.bp_type);
  561. bp_constraints_unlock(mtx);
  562. }
  563. static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
  564. {
  565. int err;
  566. __release_bp_slot(bp, old_type);
  567. err = __reserve_bp_slot(bp, new_type);
  568. if (err) {
  569. /*
  570. * Reserve the old_type slot back in case
  571. * there's no space for the new type.
  572. *
  573. * This must succeed, because we just released
  574. * the old_type slot in the __release_bp_slot
  575. * call above. If not, something is broken.
  576. */
  577. WARN_ON(__reserve_bp_slot(bp, old_type));
  578. }
  579. return err;
  580. }
  581. static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
  582. {
  583. struct mutex *mtx = bp_constraints_lock(bp);
  584. int ret = __modify_bp_slot(bp, old_type, new_type);
  585. bp_constraints_unlock(mtx);
  586. return ret;
  587. }
  588. /*
  589. * Allow the kernel debugger to reserve breakpoint slots without
  590. * taking a lock using the dbg_* variant of for the reserve and
  591. * release breakpoint slots.
  592. */
  593. int dbg_reserve_bp_slot(struct perf_event *bp)
  594. {
  595. int ret;
  596. if (bp_constraints_is_locked(bp))
  597. return -1;
  598. /* Locks aren't held; disable lockdep assert checking. */
  599. lockdep_off();
  600. ret = __reserve_bp_slot(bp, bp->attr.bp_type);
  601. lockdep_on();
  602. return ret;
  603. }
  604. int dbg_release_bp_slot(struct perf_event *bp)
  605. {
  606. if (bp_constraints_is_locked(bp))
  607. return -1;
  608. /* Locks aren't held; disable lockdep assert checking. */
  609. lockdep_off();
  610. __release_bp_slot(bp, bp->attr.bp_type);
  611. lockdep_on();
  612. return 0;
  613. }
  614. static int hw_breakpoint_parse(struct perf_event *bp,
  615. const struct perf_event_attr *attr,
  616. struct arch_hw_breakpoint *hw)
  617. {
  618. int err;
  619. err = hw_breakpoint_arch_parse(bp, attr, hw);
  620. if (err)
  621. return err;
  622. if (arch_check_bp_in_kernelspace(hw)) {
  623. if (attr->exclude_kernel)
  624. return -EINVAL;
  625. /*
  626. * Don't let unprivileged users set a breakpoint in the trap
  627. * path to avoid trap recursion attacks.
  628. */
  629. if (!capable(CAP_SYS_ADMIN))
  630. return -EPERM;
  631. }
  632. return 0;
  633. }
  634. int register_perf_hw_breakpoint(struct perf_event *bp)
  635. {
  636. struct arch_hw_breakpoint hw = { };
  637. int err;
  638. err = reserve_bp_slot(bp);
  639. if (err)
  640. return err;
  641. err = hw_breakpoint_parse(bp, &bp->attr, &hw);
  642. if (err) {
  643. release_bp_slot(bp);
  644. return err;
  645. }
  646. bp->hw.info = hw;
  647. return 0;
  648. }
  649. /**
  650. * register_user_hw_breakpoint - register a hardware breakpoint for user space
  651. * @attr: breakpoint attributes
  652. * @triggered: callback to trigger when we hit the breakpoint
  653. * @context: context data could be used in the triggered callback
  654. * @tsk: pointer to 'task_struct' of the process to which the address belongs
  655. */
  656. struct perf_event *
  657. register_user_hw_breakpoint(struct perf_event_attr *attr,
  658. perf_overflow_handler_t triggered,
  659. void *context,
  660. struct task_struct *tsk)
  661. {
  662. return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
  663. context);
  664. }
  665. EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
  666. static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
  667. struct perf_event_attr *from)
  668. {
  669. to->bp_addr = from->bp_addr;
  670. to->bp_type = from->bp_type;
  671. to->bp_len = from->bp_len;
  672. to->disabled = from->disabled;
  673. }
  674. int
  675. modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
  676. bool check)
  677. {
  678. struct arch_hw_breakpoint hw = { };
  679. int err;
  680. err = hw_breakpoint_parse(bp, attr, &hw);
  681. if (err)
  682. return err;
  683. if (check) {
  684. struct perf_event_attr old_attr;
  685. old_attr = bp->attr;
  686. hw_breakpoint_copy_attr(&old_attr, attr);
  687. if (memcmp(&old_attr, attr, sizeof(*attr)))
  688. return -EINVAL;
  689. }
  690. if (bp->attr.bp_type != attr->bp_type) {
  691. err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
  692. if (err)
  693. return err;
  694. }
  695. hw_breakpoint_copy_attr(&bp->attr, attr);
  696. bp->hw.info = hw;
  697. return 0;
  698. }
  699. /**
  700. * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  701. * @bp: the breakpoint structure to modify
  702. * @attr: new breakpoint attributes
  703. */
  704. int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
  705. {
  706. int err;
  707. /*
  708. * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
  709. * will not be possible to raise IPIs that invoke __perf_event_disable.
  710. * So call the function directly after making sure we are targeting the
  711. * current task.
  712. */
  713. if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
  714. perf_event_disable_local(bp);
  715. else
  716. perf_event_disable(bp);
  717. err = modify_user_hw_breakpoint_check(bp, attr, false);
  718. if (!bp->attr.disabled)
  719. perf_event_enable(bp);
  720. return err;
  721. }
  722. EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
  723. /**
  724. * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
  725. * @bp: the breakpoint structure to unregister
  726. */
  727. void unregister_hw_breakpoint(struct perf_event *bp)
  728. {
  729. if (!bp)
  730. return;
  731. perf_event_release_kernel(bp);
  732. }
  733. EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
  734. /**
  735. * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
  736. * @attr: breakpoint attributes
  737. * @triggered: callback to trigger when we hit the breakpoint
  738. * @context: context data could be used in the triggered callback
  739. *
  740. * @return a set of per_cpu pointers to perf events
  741. */
  742. struct perf_event * __percpu *
  743. register_wide_hw_breakpoint(struct perf_event_attr *attr,
  744. perf_overflow_handler_t triggered,
  745. void *context)
  746. {
  747. struct perf_event * __percpu *cpu_events, *bp;
  748. long err = 0;
  749. int cpu;
  750. cpu_events = alloc_percpu(typeof(*cpu_events));
  751. if (!cpu_events)
  752. return (void __percpu __force *)ERR_PTR(-ENOMEM);
  753. cpus_read_lock();
  754. for_each_online_cpu(cpu) {
  755. bp = perf_event_create_kernel_counter(attr, cpu, NULL,
  756. triggered, context);
  757. if (IS_ERR(bp)) {
  758. err = PTR_ERR(bp);
  759. break;
  760. }
  761. per_cpu(*cpu_events, cpu) = bp;
  762. }
  763. cpus_read_unlock();
  764. if (likely(!err))
  765. return cpu_events;
  766. unregister_wide_hw_breakpoint(cpu_events);
  767. return (void __percpu __force *)ERR_PTR(err);
  768. }
  769. EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
  770. /**
  771. * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
  772. * @cpu_events: the per cpu set of events to unregister
  773. */
  774. void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
  775. {
  776. int cpu;
  777. for_each_possible_cpu(cpu)
  778. unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
  779. free_percpu(cpu_events);
  780. }
  781. EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
  782. /**
  783. * hw_breakpoint_is_used - check if breakpoints are currently used
  784. *
  785. * Returns: true if breakpoints are used, false otherwise.
  786. */
  787. bool hw_breakpoint_is_used(void)
  788. {
  789. int cpu;
  790. if (!constraints_initialized)
  791. return false;
  792. for_each_possible_cpu(cpu) {
  793. for (int type = 0; type < TYPE_MAX; ++type) {
  794. struct bp_cpuinfo *info = get_bp_info(cpu, type);
  795. if (info->cpu_pinned)
  796. return true;
  797. for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
  798. if (atomic_read(&info->tsk_pinned.count[slot]))
  799. return true;
  800. }
  801. }
  802. }
  803. for (int type = 0; type < TYPE_MAX; ++type) {
  804. for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
  805. /*
  806. * Warn, because if there are CPU pinned counters,
  807. * should never get here; bp_cpuinfo::cpu_pinned should
  808. * be consistent with the global cpu_pinned histogram.
  809. */
  810. if (WARN_ON(atomic_read(&cpu_pinned[type].count[slot])))
  811. return true;
  812. if (atomic_read(&tsk_pinned_all[type].count[slot]))
  813. return true;
  814. }
  815. }
  816. return false;
  817. }
  818. static struct notifier_block hw_breakpoint_exceptions_nb = {
  819. .notifier_call = hw_breakpoint_exceptions_notify,
  820. /* we need to be notified first */
  821. .priority = 0x7fffffff
  822. };
  823. static void bp_perf_event_destroy(struct perf_event *event)
  824. {
  825. release_bp_slot(event);
  826. }
  827. static int hw_breakpoint_event_init(struct perf_event *bp)
  828. {
  829. int err;
  830. if (bp->attr.type != PERF_TYPE_BREAKPOINT)
  831. return -ENOENT;
  832. /*
  833. * no branch sampling for breakpoint events
  834. */
  835. if (has_branch_stack(bp))
  836. return -EOPNOTSUPP;
  837. err = register_perf_hw_breakpoint(bp);
  838. if (err)
  839. return err;
  840. bp->destroy = bp_perf_event_destroy;
  841. return 0;
  842. }
  843. static int hw_breakpoint_add(struct perf_event *bp, int flags)
  844. {
  845. if (!(flags & PERF_EF_START))
  846. bp->hw.state = PERF_HES_STOPPED;
  847. if (is_sampling_event(bp)) {
  848. bp->hw.last_period = bp->hw.sample_period;
  849. perf_swevent_set_period(bp);
  850. }
  851. return arch_install_hw_breakpoint(bp);
  852. }
  853. static void hw_breakpoint_del(struct perf_event *bp, int flags)
  854. {
  855. arch_uninstall_hw_breakpoint(bp);
  856. }
  857. static void hw_breakpoint_start(struct perf_event *bp, int flags)
  858. {
  859. bp->hw.state = 0;
  860. }
  861. static void hw_breakpoint_stop(struct perf_event *bp, int flags)
  862. {
  863. bp->hw.state = PERF_HES_STOPPED;
  864. }
  865. static struct pmu perf_breakpoint = {
  866. .task_ctx_nr = perf_sw_context, /* could eventually get its own */
  867. .event_init = hw_breakpoint_event_init,
  868. .add = hw_breakpoint_add,
  869. .del = hw_breakpoint_del,
  870. .start = hw_breakpoint_start,
  871. .stop = hw_breakpoint_stop,
  872. .read = hw_breakpoint_pmu_read,
  873. };
  874. int __init init_hw_breakpoint(void)
  875. {
  876. int ret;
  877. ret = rhltable_init(&task_bps_ht, &task_bps_ht_params);
  878. if (ret)
  879. return ret;
  880. ret = init_breakpoint_slots();
  881. if (ret)
  882. return ret;
  883. constraints_initialized = true;
  884. perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
  885. return register_die_notifier(&hw_breakpoint_exceptions_nb);
  886. }