hw_breakpoint.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  4. * using the CPU's debug registers. Derived from
  5. * "arch/x86/kernel/hw_breakpoint.c"
  6. *
  7. * Copyright 2010 IBM Corporation
  8. * Author: K.Prasad <[email protected]>
  9. */
  10. #include <linux/hw_breakpoint.h>
  11. #include <linux/notifier.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/percpu.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/smp.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/init.h>
  20. #include <asm/hw_breakpoint.h>
  21. #include <asm/processor.h>
  22. #include <asm/sstep.h>
  23. #include <asm/debug.h>
  24. #include <asm/hvcall.h>
  25. #include <asm/inst.h>
  26. #include <linux/uaccess.h>
  27. /*
  28. * Stores the breakpoints currently in use on each breakpoint address
  29. * register for every cpu
  30. */
  31. static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
  32. /*
  33. * Returns total number of data or instruction breakpoints available.
  34. */
  35. int hw_breakpoint_slots(int type)
  36. {
  37. if (type == TYPE_DATA)
  38. return nr_wp_slots();
  39. return 0; /* no instruction breakpoints available */
  40. }
  41. static bool single_step_pending(void)
  42. {
  43. int i;
  44. for (i = 0; i < nr_wp_slots(); i++) {
  45. if (current->thread.last_hit_ubp[i])
  46. return true;
  47. }
  48. return false;
  49. }
  50. /*
  51. * Install a perf counter breakpoint.
  52. *
  53. * We seek a free debug address register and use it for this
  54. * breakpoint.
  55. *
  56. * Atomic: we hold the counter->ctx->lock and we only handle variables
  57. * and registers local to this cpu.
  58. */
  59. int arch_install_hw_breakpoint(struct perf_event *bp)
  60. {
  61. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  62. struct perf_event **slot;
  63. int i;
  64. for (i = 0; i < nr_wp_slots(); i++) {
  65. slot = this_cpu_ptr(&bp_per_reg[i]);
  66. if (!*slot) {
  67. *slot = bp;
  68. break;
  69. }
  70. }
  71. if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
  72. return -EBUSY;
  73. /*
  74. * Do not install DABR values if the instruction must be single-stepped.
  75. * If so, DABR will be populated in single_step_dabr_instruction().
  76. */
  77. if (!single_step_pending())
  78. __set_breakpoint(i, info);
  79. return 0;
  80. }
  81. /*
  82. * Uninstall the breakpoint contained in the given counter.
  83. *
  84. * First we search the debug address register it uses and then we disable
  85. * it.
  86. *
  87. * Atomic: we hold the counter->ctx->lock and we only handle variables
  88. * and registers local to this cpu.
  89. */
  90. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  91. {
  92. struct arch_hw_breakpoint null_brk = {0};
  93. struct perf_event **slot;
  94. int i;
  95. for (i = 0; i < nr_wp_slots(); i++) {
  96. slot = this_cpu_ptr(&bp_per_reg[i]);
  97. if (*slot == bp) {
  98. *slot = NULL;
  99. break;
  100. }
  101. }
  102. if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
  103. return;
  104. __set_breakpoint(i, &null_brk);
  105. }
  106. static bool is_ptrace_bp(struct perf_event *bp)
  107. {
  108. return bp->overflow_handler == ptrace_triggered;
  109. }
  110. struct breakpoint {
  111. struct list_head list;
  112. struct perf_event *bp;
  113. bool ptrace_bp;
  114. };
  115. /*
  116. * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
  117. * rely on it safely synchronizing internals here; however, we can rely on it
  118. * not requesting more breakpoints than available.
  119. */
  120. static DEFINE_SPINLOCK(cpu_bps_lock);
  121. static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
  122. static DEFINE_SPINLOCK(task_bps_lock);
  123. static LIST_HEAD(task_bps);
  124. static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
  125. {
  126. struct breakpoint *tmp;
  127. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  128. if (!tmp)
  129. return ERR_PTR(-ENOMEM);
  130. tmp->bp = bp;
  131. tmp->ptrace_bp = is_ptrace_bp(bp);
  132. return tmp;
  133. }
  134. static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
  135. {
  136. __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
  137. bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
  138. bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
  139. bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
  140. bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
  141. return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
  142. }
  143. static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
  144. {
  145. return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
  146. }
  147. static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
  148. {
  149. return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
  150. }
  151. static int task_bps_add(struct perf_event *bp)
  152. {
  153. struct breakpoint *tmp;
  154. tmp = alloc_breakpoint(bp);
  155. if (IS_ERR(tmp))
  156. return PTR_ERR(tmp);
  157. spin_lock(&task_bps_lock);
  158. list_add(&tmp->list, &task_bps);
  159. spin_unlock(&task_bps_lock);
  160. return 0;
  161. }
  162. static void task_bps_remove(struct perf_event *bp)
  163. {
  164. struct list_head *pos, *q;
  165. spin_lock(&task_bps_lock);
  166. list_for_each_safe(pos, q, &task_bps) {
  167. struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
  168. if (tmp->bp == bp) {
  169. list_del(&tmp->list);
  170. kfree(tmp);
  171. break;
  172. }
  173. }
  174. spin_unlock(&task_bps_lock);
  175. }
  176. /*
  177. * If any task has breakpoint from alternate infrastructure,
  178. * return true. Otherwise return false.
  179. */
  180. static bool all_task_bps_check(struct perf_event *bp)
  181. {
  182. struct breakpoint *tmp;
  183. bool ret = false;
  184. spin_lock(&task_bps_lock);
  185. list_for_each_entry(tmp, &task_bps, list) {
  186. if (!can_co_exist(tmp, bp)) {
  187. ret = true;
  188. break;
  189. }
  190. }
  191. spin_unlock(&task_bps_lock);
  192. return ret;
  193. }
  194. /*
  195. * If same task has breakpoint from alternate infrastructure,
  196. * return true. Otherwise return false.
  197. */
  198. static bool same_task_bps_check(struct perf_event *bp)
  199. {
  200. struct breakpoint *tmp;
  201. bool ret = false;
  202. spin_lock(&task_bps_lock);
  203. list_for_each_entry(tmp, &task_bps, list) {
  204. if (tmp->bp->hw.target == bp->hw.target &&
  205. !can_co_exist(tmp, bp)) {
  206. ret = true;
  207. break;
  208. }
  209. }
  210. spin_unlock(&task_bps_lock);
  211. return ret;
  212. }
  213. static int cpu_bps_add(struct perf_event *bp)
  214. {
  215. struct breakpoint **cpu_bp;
  216. struct breakpoint *tmp;
  217. int i = 0;
  218. tmp = alloc_breakpoint(bp);
  219. if (IS_ERR(tmp))
  220. return PTR_ERR(tmp);
  221. spin_lock(&cpu_bps_lock);
  222. cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
  223. for (i = 0; i < nr_wp_slots(); i++) {
  224. if (!cpu_bp[i]) {
  225. cpu_bp[i] = tmp;
  226. break;
  227. }
  228. }
  229. spin_unlock(&cpu_bps_lock);
  230. return 0;
  231. }
  232. static void cpu_bps_remove(struct perf_event *bp)
  233. {
  234. struct breakpoint **cpu_bp;
  235. int i = 0;
  236. spin_lock(&cpu_bps_lock);
  237. cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
  238. for (i = 0; i < nr_wp_slots(); i++) {
  239. if (!cpu_bp[i])
  240. continue;
  241. if (cpu_bp[i]->bp == bp) {
  242. kfree(cpu_bp[i]);
  243. cpu_bp[i] = NULL;
  244. break;
  245. }
  246. }
  247. spin_unlock(&cpu_bps_lock);
  248. }
  249. static bool cpu_bps_check(int cpu, struct perf_event *bp)
  250. {
  251. struct breakpoint **cpu_bp;
  252. bool ret = false;
  253. int i;
  254. spin_lock(&cpu_bps_lock);
  255. cpu_bp = per_cpu_ptr(cpu_bps, cpu);
  256. for (i = 0; i < nr_wp_slots(); i++) {
  257. if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
  258. ret = true;
  259. break;
  260. }
  261. }
  262. spin_unlock(&cpu_bps_lock);
  263. return ret;
  264. }
  265. static bool all_cpu_bps_check(struct perf_event *bp)
  266. {
  267. int cpu;
  268. for_each_online_cpu(cpu) {
  269. if (cpu_bps_check(cpu, bp))
  270. return true;
  271. }
  272. return false;
  273. }
  274. int arch_reserve_bp_slot(struct perf_event *bp)
  275. {
  276. int ret;
  277. /* ptrace breakpoint */
  278. if (is_ptrace_bp(bp)) {
  279. if (all_cpu_bps_check(bp))
  280. return -ENOSPC;
  281. if (same_task_bps_check(bp))
  282. return -ENOSPC;
  283. return task_bps_add(bp);
  284. }
  285. /* perf breakpoint */
  286. if (is_kernel_addr(bp->attr.bp_addr))
  287. return 0;
  288. if (bp->hw.target && bp->cpu == -1) {
  289. if (same_task_bps_check(bp))
  290. return -ENOSPC;
  291. return task_bps_add(bp);
  292. } else if (!bp->hw.target && bp->cpu != -1) {
  293. if (all_task_bps_check(bp))
  294. return -ENOSPC;
  295. return cpu_bps_add(bp);
  296. }
  297. if (same_task_bps_check(bp))
  298. return -ENOSPC;
  299. ret = cpu_bps_add(bp);
  300. if (ret)
  301. return ret;
  302. ret = task_bps_add(bp);
  303. if (ret)
  304. cpu_bps_remove(bp);
  305. return ret;
  306. }
  307. void arch_release_bp_slot(struct perf_event *bp)
  308. {
  309. if (!is_kernel_addr(bp->attr.bp_addr)) {
  310. if (bp->hw.target)
  311. task_bps_remove(bp);
  312. if (bp->cpu != -1)
  313. cpu_bps_remove(bp);
  314. }
  315. }
  316. /*
  317. * Perform cleanup of arch-specific counters during unregistration
  318. * of the perf-event
  319. */
  320. void arch_unregister_hw_breakpoint(struct perf_event *bp)
  321. {
  322. /*
  323. * If the breakpoint is unregistered between a hw_breakpoint_handler()
  324. * and the single_step_dabr_instruction(), then cleanup the breakpoint
  325. * restoration variables to prevent dangling pointers.
  326. * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
  327. */
  328. if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
  329. int i;
  330. for (i = 0; i < nr_wp_slots(); i++) {
  331. if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
  332. bp->ctx->task->thread.last_hit_ubp[i] = NULL;
  333. }
  334. }
  335. }
  336. /*
  337. * Check for virtual address in kernel space.
  338. */
  339. int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
  340. {
  341. return is_kernel_addr(hw->address);
  342. }
  343. int arch_bp_generic_fields(int type, int *gen_bp_type)
  344. {
  345. *gen_bp_type = 0;
  346. if (type & HW_BRK_TYPE_READ)
  347. *gen_bp_type |= HW_BREAKPOINT_R;
  348. if (type & HW_BRK_TYPE_WRITE)
  349. *gen_bp_type |= HW_BREAKPOINT_W;
  350. if (*gen_bp_type == 0)
  351. return -EINVAL;
  352. return 0;
  353. }
  354. /*
  355. * Watchpoint match range is always doubleword(8 bytes) aligned on
  356. * powerpc. If the given range is crossing doubleword boundary, we
  357. * need to increase the length such that next doubleword also get
  358. * covered. Ex,
  359. *
  360. * address len = 6 bytes
  361. * |=========.
  362. * |------------v--|------v--------|
  363. * | | | | | | | | | | | | | | | | |
  364. * |---------------|---------------|
  365. * <---8 bytes--->
  366. *
  367. * In this case, we should configure hw as:
  368. * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
  369. * len = 16 bytes
  370. *
  371. * @start_addr is inclusive but @end_addr is exclusive.
  372. */
  373. static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
  374. {
  375. u16 max_len = DABR_MAX_LEN;
  376. u16 hw_len;
  377. unsigned long start_addr, end_addr;
  378. start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
  379. end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
  380. hw_len = end_addr - start_addr;
  381. if (dawr_enabled()) {
  382. max_len = DAWR_MAX_LEN;
  383. /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
  384. if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
  385. (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
  386. return -EINVAL;
  387. } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
  388. /* 8xx can setup a range without limitation */
  389. max_len = U16_MAX;
  390. }
  391. if (hw_len > max_len)
  392. return -EINVAL;
  393. hw->hw_len = hw_len;
  394. return 0;
  395. }
  396. /*
  397. * Validate the arch-specific HW Breakpoint register settings
  398. */
  399. int hw_breakpoint_arch_parse(struct perf_event *bp,
  400. const struct perf_event_attr *attr,
  401. struct arch_hw_breakpoint *hw)
  402. {
  403. int ret = -EINVAL;
  404. if (!bp || !attr->bp_len)
  405. return ret;
  406. hw->type = HW_BRK_TYPE_TRANSLATE;
  407. if (attr->bp_type & HW_BREAKPOINT_R)
  408. hw->type |= HW_BRK_TYPE_READ;
  409. if (attr->bp_type & HW_BREAKPOINT_W)
  410. hw->type |= HW_BRK_TYPE_WRITE;
  411. if (hw->type == HW_BRK_TYPE_TRANSLATE)
  412. /* must set alteast read or write */
  413. return ret;
  414. if (!attr->exclude_user)
  415. hw->type |= HW_BRK_TYPE_USER;
  416. if (!attr->exclude_kernel)
  417. hw->type |= HW_BRK_TYPE_KERNEL;
  418. if (!attr->exclude_hv)
  419. hw->type |= HW_BRK_TYPE_HYP;
  420. hw->address = attr->bp_addr;
  421. hw->len = attr->bp_len;
  422. if (!ppc_breakpoint_available())
  423. return -ENODEV;
  424. return hw_breakpoint_validate_len(hw);
  425. }
  426. /*
  427. * Restores the breakpoint on the debug registers.
  428. * Invoke this function if it is known that the execution context is
  429. * about to change to cause loss of MSR_SE settings.
  430. */
  431. void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
  432. {
  433. struct arch_hw_breakpoint *info;
  434. int i;
  435. preempt_disable();
  436. for (i = 0; i < nr_wp_slots(); i++) {
  437. if (unlikely(tsk->thread.last_hit_ubp[i]))
  438. goto reset;
  439. }
  440. goto out;
  441. reset:
  442. regs_set_return_msr(regs, regs->msr & ~MSR_SE);
  443. for (i = 0; i < nr_wp_slots(); i++) {
  444. info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
  445. __set_breakpoint(i, info);
  446. tsk->thread.last_hit_ubp[i] = NULL;
  447. }
  448. out:
  449. preempt_enable();
  450. }
  451. static bool is_larx_stcx_instr(int type)
  452. {
  453. return type == LARX || type == STCX;
  454. }
  455. static bool is_octword_vsx_instr(int type, int size)
  456. {
  457. return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
  458. }
  459. /*
  460. * We've failed in reliably handling the hw-breakpoint. Unregister
  461. * it and throw a warning message to let the user know about it.
  462. */
  463. static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
  464. {
  465. WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
  466. info->address);
  467. perf_event_disable_inatomic(bp);
  468. }
  469. static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
  470. {
  471. printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
  472. info->address);
  473. perf_event_disable_inatomic(bp);
  474. }
  475. static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
  476. struct arch_hw_breakpoint **info, int *hit,
  477. ppc_inst_t instr)
  478. {
  479. int i;
  480. int stepped;
  481. /* Do not emulate user-space instructions, instead single-step them */
  482. if (user_mode(regs)) {
  483. for (i = 0; i < nr_wp_slots(); i++) {
  484. if (!hit[i])
  485. continue;
  486. current->thread.last_hit_ubp[i] = bp[i];
  487. info[i] = NULL;
  488. }
  489. regs_set_return_msr(regs, regs->msr | MSR_SE);
  490. return false;
  491. }
  492. stepped = emulate_step(regs, instr);
  493. if (!stepped) {
  494. for (i = 0; i < nr_wp_slots(); i++) {
  495. if (!hit[i])
  496. continue;
  497. handler_error(bp[i], info[i]);
  498. info[i] = NULL;
  499. }
  500. return false;
  501. }
  502. return true;
  503. }
  504. static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
  505. int *hit, unsigned long ea)
  506. {
  507. int i;
  508. unsigned long hw_end_addr;
  509. /*
  510. * Handle spurious exception only when any bp_per_reg is set.
  511. * Otherwise this might be created by xmon and not actually a
  512. * spurious exception.
  513. */
  514. for (i = 0; i < nr_wp_slots(); i++) {
  515. if (!info[i])
  516. continue;
  517. hw_end_addr = ALIGN(info[i]->address + info[i]->len, HW_BREAKPOINT_SIZE);
  518. /*
  519. * Ending address of DAWR range is less than starting
  520. * address of op.
  521. */
  522. if ((hw_end_addr - 1) >= ea)
  523. continue;
  524. /*
  525. * Those addresses need to be in the same or in two
  526. * consecutive 512B blocks;
  527. */
  528. if (((hw_end_addr - 1) >> 10) != (ea >> 10))
  529. continue;
  530. /*
  531. * 'op address + 64B' generates an address that has a
  532. * carry into bit 52 (crosses 2K boundary).
  533. */
  534. if ((ea & 0x800) == ((ea + 64) & 0x800))
  535. continue;
  536. break;
  537. }
  538. if (i == nr_wp_slots())
  539. return;
  540. for (i = 0; i < nr_wp_slots(); i++) {
  541. if (info[i]) {
  542. hit[i] = 1;
  543. info[i]->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
  544. }
  545. }
  546. }
  547. /*
  548. * Handle a DABR or DAWR exception.
  549. *
  550. * Called in atomic context.
  551. */
  552. int hw_breakpoint_handler(struct die_args *args)
  553. {
  554. bool err = false;
  555. int rc = NOTIFY_STOP;
  556. struct perf_event *bp[HBP_NUM_MAX] = { NULL };
  557. struct pt_regs *regs = args->regs;
  558. struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
  559. int i;
  560. int hit[HBP_NUM_MAX] = {0};
  561. int nr_hit = 0;
  562. bool ptrace_bp = false;
  563. ppc_inst_t instr = ppc_inst(0);
  564. int type = 0;
  565. int size = 0;
  566. unsigned long ea;
  567. /* Disable breakpoints during exception handling */
  568. hw_breakpoint_disable();
  569. /*
  570. * The counter may be concurrently released but that can only
  571. * occur from a call_rcu() path. We can then safely fetch
  572. * the breakpoint, use its callback, touch its counter
  573. * while we are in an rcu_read_lock() path.
  574. */
  575. rcu_read_lock();
  576. if (!IS_ENABLED(CONFIG_PPC_8xx))
  577. wp_get_instr_detail(regs, &instr, &type, &size, &ea);
  578. for (i = 0; i < nr_wp_slots(); i++) {
  579. bp[i] = __this_cpu_read(bp_per_reg[i]);
  580. if (!bp[i])
  581. continue;
  582. info[i] = counter_arch_bp(bp[i]);
  583. info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
  584. if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
  585. if (!IS_ENABLED(CONFIG_PPC_8xx) &&
  586. ppc_inst_equal(instr, ppc_inst(0))) {
  587. handler_error(bp[i], info[i]);
  588. info[i] = NULL;
  589. err = 1;
  590. continue;
  591. }
  592. if (is_ptrace_bp(bp[i]))
  593. ptrace_bp = true;
  594. hit[i] = 1;
  595. nr_hit++;
  596. }
  597. }
  598. if (err)
  599. goto reset;
  600. if (!nr_hit) {
  601. /* Workaround for Power10 DD1 */
  602. if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
  603. is_octword_vsx_instr(type, size)) {
  604. handle_p10dd1_spurious_exception(info, hit, ea);
  605. } else {
  606. rc = NOTIFY_DONE;
  607. goto out;
  608. }
  609. }
  610. /*
  611. * Return early after invoking user-callback function without restoring
  612. * DABR if the breakpoint is from ptrace which always operates in
  613. * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
  614. * generated in do_dabr().
  615. */
  616. if (ptrace_bp) {
  617. for (i = 0; i < nr_wp_slots(); i++) {
  618. if (!hit[i])
  619. continue;
  620. perf_bp_event(bp[i], regs);
  621. info[i] = NULL;
  622. }
  623. rc = NOTIFY_DONE;
  624. goto reset;
  625. }
  626. if (!IS_ENABLED(CONFIG_PPC_8xx)) {
  627. if (is_larx_stcx_instr(type)) {
  628. for (i = 0; i < nr_wp_slots(); i++) {
  629. if (!hit[i])
  630. continue;
  631. larx_stcx_err(bp[i], info[i]);
  632. info[i] = NULL;
  633. }
  634. goto reset;
  635. }
  636. if (!stepping_handler(regs, bp, info, hit, instr))
  637. goto reset;
  638. }
  639. /*
  640. * As a policy, the callback is invoked in a 'trigger-after-execute'
  641. * fashion
  642. */
  643. for (i = 0; i < nr_wp_slots(); i++) {
  644. if (!hit[i])
  645. continue;
  646. if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
  647. perf_bp_event(bp[i], regs);
  648. }
  649. reset:
  650. for (i = 0; i < nr_wp_slots(); i++) {
  651. if (!info[i])
  652. continue;
  653. __set_breakpoint(i, info[i]);
  654. }
  655. out:
  656. rcu_read_unlock();
  657. return rc;
  658. }
  659. NOKPROBE_SYMBOL(hw_breakpoint_handler);
  660. /*
  661. * Handle single-step exceptions following a DABR hit.
  662. *
  663. * Called in atomic context.
  664. */
  665. static int single_step_dabr_instruction(struct die_args *args)
  666. {
  667. struct pt_regs *regs = args->regs;
  668. struct perf_event *bp = NULL;
  669. struct arch_hw_breakpoint *info;
  670. int i;
  671. bool found = false;
  672. /*
  673. * Check if we are single-stepping as a result of a
  674. * previous HW Breakpoint exception
  675. */
  676. for (i = 0; i < nr_wp_slots(); i++) {
  677. bp = current->thread.last_hit_ubp[i];
  678. if (!bp)
  679. continue;
  680. found = true;
  681. info = counter_arch_bp(bp);
  682. /*
  683. * We shall invoke the user-defined callback function in the
  684. * single stepping handler to confirm to 'trigger-after-execute'
  685. * semantics
  686. */
  687. if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
  688. perf_bp_event(bp, regs);
  689. current->thread.last_hit_ubp[i] = NULL;
  690. }
  691. if (!found)
  692. return NOTIFY_DONE;
  693. for (i = 0; i < nr_wp_slots(); i++) {
  694. bp = __this_cpu_read(bp_per_reg[i]);
  695. if (!bp)
  696. continue;
  697. info = counter_arch_bp(bp);
  698. __set_breakpoint(i, info);
  699. }
  700. /*
  701. * If the process was being single-stepped by ptrace, let the
  702. * other single-step actions occur (e.g. generate SIGTRAP).
  703. */
  704. if (test_thread_flag(TIF_SINGLESTEP))
  705. return NOTIFY_DONE;
  706. return NOTIFY_STOP;
  707. }
  708. NOKPROBE_SYMBOL(single_step_dabr_instruction);
  709. /*
  710. * Handle debug exception notifications.
  711. *
  712. * Called in atomic context.
  713. */
  714. int hw_breakpoint_exceptions_notify(
  715. struct notifier_block *unused, unsigned long val, void *data)
  716. {
  717. int ret = NOTIFY_DONE;
  718. switch (val) {
  719. case DIE_DABR_MATCH:
  720. ret = hw_breakpoint_handler(data);
  721. break;
  722. case DIE_SSTEP:
  723. ret = single_step_dabr_instruction(data);
  724. break;
  725. }
  726. return ret;
  727. }
  728. NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
  729. /*
  730. * Release the user breakpoints used by ptrace
  731. */
  732. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  733. {
  734. int i;
  735. struct thread_struct *t = &tsk->thread;
  736. for (i = 0; i < nr_wp_slots(); i++) {
  737. unregister_hw_breakpoint(t->ptrace_bps[i]);
  738. t->ptrace_bps[i] = NULL;
  739. }
  740. }
  741. void hw_breakpoint_pmu_read(struct perf_event *bp)
  742. {
  743. /* TODO */
  744. }
  745. void ptrace_triggered(struct perf_event *bp,
  746. struct perf_sample_data *data, struct pt_regs *regs)
  747. {
  748. struct perf_event_attr attr;
  749. /*
  750. * Disable the breakpoint request here since ptrace has defined a
  751. * one-shot behaviour for breakpoint exceptions in PPC64.
  752. * The SIGTRAP signal is generated automatically for us in do_dabr().
  753. * We don't have to do anything about that here
  754. */
  755. attr = bp->attr;
  756. attr.disabled = true;
  757. modify_user_hw_breakpoint(bp, &attr);
  758. }