hw_breakpoint.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  4. * using the CPU's debug registers.
  5. *
  6. * Copyright (C) 2012 ARM Limited
  7. * Author: Will Deacon <[email protected]>
  8. */
  9. #define pr_fmt(fmt) "hw-breakpoint: " fmt
  10. #include <linux/compat.h>
  11. #include <linux/cpu_pm.h>
  12. #include <linux/errno.h>
  13. #include <linux/hw_breakpoint.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/perf_event.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/smp.h>
  18. #include <linux/uaccess.h>
  19. #include <asm/current.h>
  20. #include <asm/debug-monitors.h>
  21. #include <asm/hw_breakpoint.h>
  22. #include <asm/traps.h>
  23. #include <asm/cputype.h>
  24. #include <asm/system_misc.h>
  25. /* Breakpoint currently in use for each BRP. */
  26. static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
  27. /* Watchpoint currently in use for each WRP. */
  28. static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
  29. /* Currently stepping a per-CPU kernel breakpoint. */
  30. static DEFINE_PER_CPU(int, stepping_kernel_bp);
  31. /* Number of BRP/WRP registers on this CPU. */
  32. static int core_num_brps;
  33. static int core_num_wrps;
  34. int hw_breakpoint_slots(int type)
  35. {
  36. /*
  37. * We can be called early, so don't rely on
  38. * our static variables being initialised.
  39. */
  40. switch (type) {
  41. case TYPE_INST:
  42. return get_num_brps();
  43. case TYPE_DATA:
  44. return get_num_wrps();
  45. default:
  46. pr_warn("unknown slot type: %d\n", type);
  47. return 0;
  48. }
  49. }
  50. #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
  51. case (OFF + N): \
  52. AARCH64_DBG_READ(N, REG, VAL); \
  53. break
  54. #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
  55. case (OFF + N): \
  56. AARCH64_DBG_WRITE(N, REG, VAL); \
  57. break
  58. #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
  59. READ_WB_REG_CASE(OFF, 0, REG, VAL); \
  60. READ_WB_REG_CASE(OFF, 1, REG, VAL); \
  61. READ_WB_REG_CASE(OFF, 2, REG, VAL); \
  62. READ_WB_REG_CASE(OFF, 3, REG, VAL); \
  63. READ_WB_REG_CASE(OFF, 4, REG, VAL); \
  64. READ_WB_REG_CASE(OFF, 5, REG, VAL); \
  65. READ_WB_REG_CASE(OFF, 6, REG, VAL); \
  66. READ_WB_REG_CASE(OFF, 7, REG, VAL); \
  67. READ_WB_REG_CASE(OFF, 8, REG, VAL); \
  68. READ_WB_REG_CASE(OFF, 9, REG, VAL); \
  69. READ_WB_REG_CASE(OFF, 10, REG, VAL); \
  70. READ_WB_REG_CASE(OFF, 11, REG, VAL); \
  71. READ_WB_REG_CASE(OFF, 12, REG, VAL); \
  72. READ_WB_REG_CASE(OFF, 13, REG, VAL); \
  73. READ_WB_REG_CASE(OFF, 14, REG, VAL); \
  74. READ_WB_REG_CASE(OFF, 15, REG, VAL)
  75. #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
  76. WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
  77. WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
  78. WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
  79. WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
  80. WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
  81. WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
  82. WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
  83. WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
  84. WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
  85. WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
  86. WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
  87. WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
  88. WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
  89. WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
  90. WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
  91. WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
  92. static u64 read_wb_reg(int reg, int n)
  93. {
  94. u64 val = 0;
  95. switch (reg + n) {
  96. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
  97. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
  98. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
  99. GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
  100. default:
  101. pr_warn("attempt to read from unknown breakpoint register %d\n", n);
  102. }
  103. return val;
  104. }
  105. NOKPROBE_SYMBOL(read_wb_reg);
  106. static void write_wb_reg(int reg, int n, u64 val)
  107. {
  108. switch (reg + n) {
  109. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
  110. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
  111. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
  112. GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
  113. default:
  114. pr_warn("attempt to write to unknown breakpoint register %d\n", n);
  115. }
  116. isb();
  117. }
  118. NOKPROBE_SYMBOL(write_wb_reg);
  119. /*
  120. * Convert a breakpoint privilege level to the corresponding exception
  121. * level.
  122. */
  123. static enum dbg_active_el debug_exception_level(int privilege)
  124. {
  125. switch (privilege) {
  126. case AARCH64_BREAKPOINT_EL0:
  127. return DBG_ACTIVE_EL0;
  128. case AARCH64_BREAKPOINT_EL1:
  129. return DBG_ACTIVE_EL1;
  130. default:
  131. pr_warn("invalid breakpoint privilege level %d\n", privilege);
  132. return -EINVAL;
  133. }
  134. }
  135. NOKPROBE_SYMBOL(debug_exception_level);
  136. enum hw_breakpoint_ops {
  137. HW_BREAKPOINT_INSTALL,
  138. HW_BREAKPOINT_UNINSTALL,
  139. HW_BREAKPOINT_RESTORE
  140. };
  141. static int is_compat_bp(struct perf_event *bp)
  142. {
  143. struct task_struct *tsk = bp->hw.target;
  144. /*
  145. * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
  146. * In this case, use the native interface, since we don't have
  147. * the notion of a "compat CPU" and could end up relying on
  148. * deprecated behaviour if we use unaligned watchpoints in
  149. * AArch64 state.
  150. */
  151. return tsk && is_compat_thread(task_thread_info(tsk));
  152. }
  153. /**
  154. * hw_breakpoint_slot_setup - Find and setup a perf slot according to
  155. * operations
  156. *
  157. * @slots: pointer to array of slots
  158. * @max_slots: max number of slots
  159. * @bp: perf_event to setup
  160. * @ops: operation to be carried out on the slot
  161. *
  162. * Return:
  163. * slot index on success
  164. * -ENOSPC if no slot is available/matches
  165. * -EINVAL on wrong operations parameter
  166. */
  167. static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
  168. struct perf_event *bp,
  169. enum hw_breakpoint_ops ops)
  170. {
  171. int i;
  172. struct perf_event **slot;
  173. for (i = 0; i < max_slots; ++i) {
  174. slot = &slots[i];
  175. switch (ops) {
  176. case HW_BREAKPOINT_INSTALL:
  177. if (!*slot) {
  178. *slot = bp;
  179. return i;
  180. }
  181. break;
  182. case HW_BREAKPOINT_UNINSTALL:
  183. if (*slot == bp) {
  184. *slot = NULL;
  185. return i;
  186. }
  187. break;
  188. case HW_BREAKPOINT_RESTORE:
  189. if (*slot == bp)
  190. return i;
  191. break;
  192. default:
  193. pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
  194. return -EINVAL;
  195. }
  196. }
  197. return -ENOSPC;
  198. }
  199. static int hw_breakpoint_control(struct perf_event *bp,
  200. enum hw_breakpoint_ops ops)
  201. {
  202. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  203. struct perf_event **slots;
  204. struct debug_info *debug_info = &current->thread.debug;
  205. int i, max_slots, ctrl_reg, val_reg, reg_enable;
  206. enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
  207. u32 ctrl;
  208. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
  209. /* Breakpoint */
  210. ctrl_reg = AARCH64_DBG_REG_BCR;
  211. val_reg = AARCH64_DBG_REG_BVR;
  212. slots = this_cpu_ptr(bp_on_reg);
  213. max_slots = core_num_brps;
  214. reg_enable = !debug_info->bps_disabled;
  215. } else {
  216. /* Watchpoint */
  217. ctrl_reg = AARCH64_DBG_REG_WCR;
  218. val_reg = AARCH64_DBG_REG_WVR;
  219. slots = this_cpu_ptr(wp_on_reg);
  220. max_slots = core_num_wrps;
  221. reg_enable = !debug_info->wps_disabled;
  222. }
  223. i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
  224. if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
  225. return i;
  226. switch (ops) {
  227. case HW_BREAKPOINT_INSTALL:
  228. /*
  229. * Ensure debug monitors are enabled at the correct exception
  230. * level.
  231. */
  232. enable_debug_monitors(dbg_el);
  233. fallthrough;
  234. case HW_BREAKPOINT_RESTORE:
  235. /* Setup the address register. */
  236. write_wb_reg(val_reg, i, info->address);
  237. /* Setup the control register. */
  238. ctrl = encode_ctrl_reg(info->ctrl);
  239. write_wb_reg(ctrl_reg, i,
  240. reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
  241. break;
  242. case HW_BREAKPOINT_UNINSTALL:
  243. /* Reset the control register. */
  244. write_wb_reg(ctrl_reg, i, 0);
  245. /*
  246. * Release the debug monitors for the correct exception
  247. * level.
  248. */
  249. disable_debug_monitors(dbg_el);
  250. break;
  251. }
  252. return 0;
  253. }
  254. /*
  255. * Install a perf counter breakpoint.
  256. */
  257. int arch_install_hw_breakpoint(struct perf_event *bp)
  258. {
  259. return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
  260. }
  261. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  262. {
  263. hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
  264. }
  265. static int get_hbp_len(u8 hbp_len)
  266. {
  267. unsigned int len_in_bytes = 0;
  268. switch (hbp_len) {
  269. case ARM_BREAKPOINT_LEN_1:
  270. len_in_bytes = 1;
  271. break;
  272. case ARM_BREAKPOINT_LEN_2:
  273. len_in_bytes = 2;
  274. break;
  275. case ARM_BREAKPOINT_LEN_3:
  276. len_in_bytes = 3;
  277. break;
  278. case ARM_BREAKPOINT_LEN_4:
  279. len_in_bytes = 4;
  280. break;
  281. case ARM_BREAKPOINT_LEN_5:
  282. len_in_bytes = 5;
  283. break;
  284. case ARM_BREAKPOINT_LEN_6:
  285. len_in_bytes = 6;
  286. break;
  287. case ARM_BREAKPOINT_LEN_7:
  288. len_in_bytes = 7;
  289. break;
  290. case ARM_BREAKPOINT_LEN_8:
  291. len_in_bytes = 8;
  292. break;
  293. }
  294. return len_in_bytes;
  295. }
  296. /*
  297. * Check whether bp virtual address is in kernel space.
  298. */
  299. int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
  300. {
  301. unsigned int len;
  302. unsigned long va;
  303. va = hw->address;
  304. len = get_hbp_len(hw->ctrl.len);
  305. return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
  306. }
  307. /*
  308. * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
  309. * Hopefully this will disappear when ptrace can bypass the conversion
  310. * to generic breakpoint descriptions.
  311. */
  312. int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
  313. int *gen_len, int *gen_type, int *offset)
  314. {
  315. /* Type */
  316. switch (ctrl.type) {
  317. case ARM_BREAKPOINT_EXECUTE:
  318. *gen_type = HW_BREAKPOINT_X;
  319. break;
  320. case ARM_BREAKPOINT_LOAD:
  321. *gen_type = HW_BREAKPOINT_R;
  322. break;
  323. case ARM_BREAKPOINT_STORE:
  324. *gen_type = HW_BREAKPOINT_W;
  325. break;
  326. case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
  327. *gen_type = HW_BREAKPOINT_RW;
  328. break;
  329. default:
  330. return -EINVAL;
  331. }
  332. if (!ctrl.len)
  333. return -EINVAL;
  334. *offset = __ffs(ctrl.len);
  335. /* Len */
  336. switch (ctrl.len >> *offset) {
  337. case ARM_BREAKPOINT_LEN_1:
  338. *gen_len = HW_BREAKPOINT_LEN_1;
  339. break;
  340. case ARM_BREAKPOINT_LEN_2:
  341. *gen_len = HW_BREAKPOINT_LEN_2;
  342. break;
  343. case ARM_BREAKPOINT_LEN_3:
  344. *gen_len = HW_BREAKPOINT_LEN_3;
  345. break;
  346. case ARM_BREAKPOINT_LEN_4:
  347. *gen_len = HW_BREAKPOINT_LEN_4;
  348. break;
  349. case ARM_BREAKPOINT_LEN_5:
  350. *gen_len = HW_BREAKPOINT_LEN_5;
  351. break;
  352. case ARM_BREAKPOINT_LEN_6:
  353. *gen_len = HW_BREAKPOINT_LEN_6;
  354. break;
  355. case ARM_BREAKPOINT_LEN_7:
  356. *gen_len = HW_BREAKPOINT_LEN_7;
  357. break;
  358. case ARM_BREAKPOINT_LEN_8:
  359. *gen_len = HW_BREAKPOINT_LEN_8;
  360. break;
  361. default:
  362. return -EINVAL;
  363. }
  364. return 0;
  365. }
  366. /*
  367. * Construct an arch_hw_breakpoint from a perf_event.
  368. */
  369. static int arch_build_bp_info(struct perf_event *bp,
  370. const struct perf_event_attr *attr,
  371. struct arch_hw_breakpoint *hw)
  372. {
  373. /* Type */
  374. switch (attr->bp_type) {
  375. case HW_BREAKPOINT_X:
  376. hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
  377. break;
  378. case HW_BREAKPOINT_R:
  379. hw->ctrl.type = ARM_BREAKPOINT_LOAD;
  380. break;
  381. case HW_BREAKPOINT_W:
  382. hw->ctrl.type = ARM_BREAKPOINT_STORE;
  383. break;
  384. case HW_BREAKPOINT_RW:
  385. hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
  386. break;
  387. default:
  388. return -EINVAL;
  389. }
  390. /* Len */
  391. switch (attr->bp_len) {
  392. case HW_BREAKPOINT_LEN_1:
  393. hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
  394. break;
  395. case HW_BREAKPOINT_LEN_2:
  396. hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
  397. break;
  398. case HW_BREAKPOINT_LEN_3:
  399. hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
  400. break;
  401. case HW_BREAKPOINT_LEN_4:
  402. hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
  403. break;
  404. case HW_BREAKPOINT_LEN_5:
  405. hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
  406. break;
  407. case HW_BREAKPOINT_LEN_6:
  408. hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
  409. break;
  410. case HW_BREAKPOINT_LEN_7:
  411. hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
  412. break;
  413. case HW_BREAKPOINT_LEN_8:
  414. hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
  415. break;
  416. default:
  417. return -EINVAL;
  418. }
  419. /*
  420. * On AArch64, we only permit breakpoints of length 4, whereas
  421. * AArch32 also requires breakpoints of length 2 for Thumb.
  422. * Watchpoints can be of length 1, 2, 4 or 8 bytes.
  423. */
  424. if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
  425. if (is_compat_bp(bp)) {
  426. if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
  427. hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
  428. return -EINVAL;
  429. } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
  430. /*
  431. * FIXME: Some tools (I'm looking at you perf) assume
  432. * that breakpoints should be sizeof(long). This
  433. * is nonsense. For now, we fix up the parameter
  434. * but we should probably return -EINVAL instead.
  435. */
  436. hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
  437. }
  438. }
  439. /* Address */
  440. hw->address = attr->bp_addr;
  441. /*
  442. * Privilege
  443. * Note that we disallow combined EL0/EL1 breakpoints because
  444. * that would complicate the stepping code.
  445. */
  446. if (arch_check_bp_in_kernelspace(hw))
  447. hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
  448. else
  449. hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
  450. /* Enabled? */
  451. hw->ctrl.enabled = !attr->disabled;
  452. return 0;
  453. }
  454. /*
  455. * Validate the arch-specific HW Breakpoint register settings.
  456. */
  457. int hw_breakpoint_arch_parse(struct perf_event *bp,
  458. const struct perf_event_attr *attr,
  459. struct arch_hw_breakpoint *hw)
  460. {
  461. int ret;
  462. u64 alignment_mask, offset;
  463. /* Build the arch_hw_breakpoint. */
  464. ret = arch_build_bp_info(bp, attr, hw);
  465. if (ret)
  466. return ret;
  467. /*
  468. * Check address alignment.
  469. * We don't do any clever alignment correction for watchpoints
  470. * because using 64-bit unaligned addresses is deprecated for
  471. * AArch64.
  472. *
  473. * AArch32 tasks expect some simple alignment fixups, so emulate
  474. * that here.
  475. */
  476. if (is_compat_bp(bp)) {
  477. if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
  478. alignment_mask = 0x7;
  479. else
  480. alignment_mask = 0x3;
  481. offset = hw->address & alignment_mask;
  482. switch (offset) {
  483. case 0:
  484. /* Aligned */
  485. break;
  486. case 1:
  487. case 2:
  488. /* Allow halfword watchpoints and breakpoints. */
  489. if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
  490. break;
  491. fallthrough;
  492. case 3:
  493. /* Allow single byte watchpoint. */
  494. if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
  495. break;
  496. fallthrough;
  497. default:
  498. return -EINVAL;
  499. }
  500. } else {
  501. if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
  502. alignment_mask = 0x3;
  503. else
  504. alignment_mask = 0x7;
  505. offset = hw->address & alignment_mask;
  506. }
  507. hw->address &= ~alignment_mask;
  508. hw->ctrl.len <<= offset;
  509. /*
  510. * Disallow per-task kernel breakpoints since these would
  511. * complicate the stepping code.
  512. */
  513. if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
  514. return -EINVAL;
  515. return 0;
  516. }
  517. /*
  518. * Enable/disable all of the breakpoints active at the specified
  519. * exception level at the register level.
  520. * This is used when single-stepping after a breakpoint exception.
  521. */
  522. static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
  523. {
  524. int i, max_slots, privilege;
  525. u32 ctrl;
  526. struct perf_event **slots;
  527. switch (reg) {
  528. case AARCH64_DBG_REG_BCR:
  529. slots = this_cpu_ptr(bp_on_reg);
  530. max_slots = core_num_brps;
  531. break;
  532. case AARCH64_DBG_REG_WCR:
  533. slots = this_cpu_ptr(wp_on_reg);
  534. max_slots = core_num_wrps;
  535. break;
  536. default:
  537. return;
  538. }
  539. for (i = 0; i < max_slots; ++i) {
  540. if (!slots[i])
  541. continue;
  542. privilege = counter_arch_bp(slots[i])->ctrl.privilege;
  543. if (debug_exception_level(privilege) != el)
  544. continue;
  545. ctrl = read_wb_reg(reg, i);
  546. if (enable)
  547. ctrl |= 0x1;
  548. else
  549. ctrl &= ~0x1;
  550. write_wb_reg(reg, i, ctrl);
  551. }
  552. }
  553. NOKPROBE_SYMBOL(toggle_bp_registers);
  554. /*
  555. * Debug exception handlers.
  556. */
  557. static int breakpoint_handler(unsigned long unused, unsigned long esr,
  558. struct pt_regs *regs)
  559. {
  560. int i, step = 0, *kernel_step;
  561. u32 ctrl_reg;
  562. u64 addr, val;
  563. struct perf_event *bp, **slots;
  564. struct debug_info *debug_info;
  565. struct arch_hw_breakpoint_ctrl ctrl;
  566. slots = this_cpu_ptr(bp_on_reg);
  567. addr = instruction_pointer(regs);
  568. debug_info = &current->thread.debug;
  569. for (i = 0; i < core_num_brps; ++i) {
  570. rcu_read_lock();
  571. bp = slots[i];
  572. if (bp == NULL)
  573. goto unlock;
  574. /* Check if the breakpoint value matches. */
  575. val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
  576. if (val != (addr & ~0x3))
  577. goto unlock;
  578. /* Possible match, check the byte address select to confirm. */
  579. ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
  580. decode_ctrl_reg(ctrl_reg, &ctrl);
  581. if (!((1 << (addr & 0x3)) & ctrl.len))
  582. goto unlock;
  583. counter_arch_bp(bp)->trigger = addr;
  584. perf_bp_event(bp, regs);
  585. /* Do we need to handle the stepping? */
  586. if (uses_default_overflow_handler(bp))
  587. step = 1;
  588. unlock:
  589. rcu_read_unlock();
  590. }
  591. if (!step)
  592. return 0;
  593. if (user_mode(regs)) {
  594. debug_info->bps_disabled = 1;
  595. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
  596. /* If we're already stepping a watchpoint, just return. */
  597. if (debug_info->wps_disabled)
  598. return 0;
  599. if (test_thread_flag(TIF_SINGLESTEP))
  600. debug_info->suspended_step = 1;
  601. else
  602. user_enable_single_step(current);
  603. } else {
  604. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
  605. kernel_step = this_cpu_ptr(&stepping_kernel_bp);
  606. if (*kernel_step != ARM_KERNEL_STEP_NONE)
  607. return 0;
  608. if (kernel_active_single_step()) {
  609. *kernel_step = ARM_KERNEL_STEP_SUSPEND;
  610. } else {
  611. *kernel_step = ARM_KERNEL_STEP_ACTIVE;
  612. kernel_enable_single_step(regs);
  613. }
  614. }
  615. return 0;
  616. }
  617. NOKPROBE_SYMBOL(breakpoint_handler);
  618. /*
  619. * Arm64 hardware does not always report a watchpoint hit address that matches
  620. * one of the watchpoints set. It can also report an address "near" the
  621. * watchpoint if a single instruction access both watched and unwatched
  622. * addresses. There is no straight-forward way, short of disassembling the
  623. * offending instruction, to map that address back to the watchpoint. This
  624. * function computes the distance of the memory access from the watchpoint as a
  625. * heuristic for the likelihood that a given access triggered the watchpoint.
  626. *
  627. * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
  628. * exception" of ARMv8 Architecture Reference Manual for details.
  629. *
  630. * The function returns the distance of the address from the bytes watched by
  631. * the watchpoint. In case of an exact match, it returns 0.
  632. */
  633. static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
  634. struct arch_hw_breakpoint_ctrl *ctrl)
  635. {
  636. u64 wp_low, wp_high;
  637. u32 lens, lene;
  638. addr = untagged_addr(addr);
  639. lens = __ffs(ctrl->len);
  640. lene = __fls(ctrl->len);
  641. wp_low = val + lens;
  642. wp_high = val + lene;
  643. if (addr < wp_low)
  644. return wp_low - addr;
  645. else if (addr > wp_high)
  646. return addr - wp_high;
  647. else
  648. return 0;
  649. }
  650. static int watchpoint_report(struct perf_event *wp, unsigned long addr,
  651. struct pt_regs *regs)
  652. {
  653. int step = uses_default_overflow_handler(wp);
  654. struct arch_hw_breakpoint *info = counter_arch_bp(wp);
  655. info->trigger = addr;
  656. /*
  657. * If we triggered a user watchpoint from a uaccess routine, then
  658. * handle the stepping ourselves since userspace really can't help
  659. * us with this.
  660. */
  661. if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
  662. step = 1;
  663. else
  664. perf_bp_event(wp, regs);
  665. return step;
  666. }
  667. static int watchpoint_handler(unsigned long addr, unsigned long esr,
  668. struct pt_regs *regs)
  669. {
  670. int i, step = 0, *kernel_step, access, closest_match = 0;
  671. u64 min_dist = -1, dist;
  672. u32 ctrl_reg;
  673. u64 val;
  674. struct perf_event *wp, **slots;
  675. struct debug_info *debug_info;
  676. struct arch_hw_breakpoint_ctrl ctrl;
  677. slots = this_cpu_ptr(wp_on_reg);
  678. debug_info = &current->thread.debug;
  679. /*
  680. * Find all watchpoints that match the reported address. If no exact
  681. * match is found. Attribute the hit to the closest watchpoint.
  682. */
  683. rcu_read_lock();
  684. for (i = 0; i < core_num_wrps; ++i) {
  685. wp = slots[i];
  686. if (wp == NULL)
  687. continue;
  688. /*
  689. * Check that the access type matches.
  690. * 0 => load, otherwise => store
  691. */
  692. access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
  693. HW_BREAKPOINT_R;
  694. if (!(access & hw_breakpoint_type(wp)))
  695. continue;
  696. /* Check if the watchpoint value and byte select match. */
  697. val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
  698. ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
  699. decode_ctrl_reg(ctrl_reg, &ctrl);
  700. dist = get_distance_from_watchpoint(addr, val, &ctrl);
  701. if (dist < min_dist) {
  702. min_dist = dist;
  703. closest_match = i;
  704. }
  705. /* Is this an exact match? */
  706. if (dist != 0)
  707. continue;
  708. step = watchpoint_report(wp, addr, regs);
  709. }
  710. /* No exact match found? */
  711. if (min_dist > 0 && min_dist != -1)
  712. step = watchpoint_report(slots[closest_match], addr, regs);
  713. rcu_read_unlock();
  714. if (!step)
  715. return 0;
  716. /*
  717. * We always disable EL0 watchpoints because the kernel can
  718. * cause these to fire via an unprivileged access.
  719. */
  720. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
  721. if (user_mode(regs)) {
  722. debug_info->wps_disabled = 1;
  723. /* If we're already stepping a breakpoint, just return. */
  724. if (debug_info->bps_disabled)
  725. return 0;
  726. if (test_thread_flag(TIF_SINGLESTEP))
  727. debug_info->suspended_step = 1;
  728. else
  729. user_enable_single_step(current);
  730. } else {
  731. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
  732. kernel_step = this_cpu_ptr(&stepping_kernel_bp);
  733. if (*kernel_step != ARM_KERNEL_STEP_NONE)
  734. return 0;
  735. if (kernel_active_single_step()) {
  736. *kernel_step = ARM_KERNEL_STEP_SUSPEND;
  737. } else {
  738. *kernel_step = ARM_KERNEL_STEP_ACTIVE;
  739. kernel_enable_single_step(regs);
  740. }
  741. }
  742. return 0;
  743. }
  744. NOKPROBE_SYMBOL(watchpoint_handler);
  745. /*
  746. * Handle single-step exception.
  747. */
  748. int reinstall_suspended_bps(struct pt_regs *regs)
  749. {
  750. struct debug_info *debug_info = &current->thread.debug;
  751. int handled_exception = 0, *kernel_step;
  752. kernel_step = this_cpu_ptr(&stepping_kernel_bp);
  753. /*
  754. * Called from single-step exception handler.
  755. * Return 0 if execution can resume, 1 if a SIGTRAP should be
  756. * reported.
  757. */
  758. if (user_mode(regs)) {
  759. if (debug_info->bps_disabled) {
  760. debug_info->bps_disabled = 0;
  761. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
  762. handled_exception = 1;
  763. }
  764. if (debug_info->wps_disabled) {
  765. debug_info->wps_disabled = 0;
  766. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
  767. handled_exception = 1;
  768. }
  769. if (handled_exception) {
  770. if (debug_info->suspended_step) {
  771. debug_info->suspended_step = 0;
  772. /* Allow exception handling to fall-through. */
  773. handled_exception = 0;
  774. } else {
  775. user_disable_single_step(current);
  776. }
  777. }
  778. } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
  779. toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
  780. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
  781. if (!debug_info->wps_disabled)
  782. toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
  783. if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
  784. kernel_disable_single_step();
  785. handled_exception = 1;
  786. } else {
  787. handled_exception = 0;
  788. }
  789. *kernel_step = ARM_KERNEL_STEP_NONE;
  790. }
  791. return !handled_exception;
  792. }
  793. NOKPROBE_SYMBOL(reinstall_suspended_bps);
  794. /*
  795. * Context-switcher for restoring suspended breakpoints.
  796. */
  797. void hw_breakpoint_thread_switch(struct task_struct *next)
  798. {
  799. /*
  800. * current next
  801. * disabled: 0 0 => The usual case, NOTIFY_DONE
  802. * 0 1 => Disable the registers
  803. * 1 0 => Enable the registers
  804. * 1 1 => NOTIFY_DONE. per-task bps will
  805. * get taken care of by perf.
  806. */
  807. struct debug_info *current_debug_info, *next_debug_info;
  808. current_debug_info = &current->thread.debug;
  809. next_debug_info = &next->thread.debug;
  810. /* Update breakpoints. */
  811. if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
  812. toggle_bp_registers(AARCH64_DBG_REG_BCR,
  813. DBG_ACTIVE_EL0,
  814. !next_debug_info->bps_disabled);
  815. /* Update watchpoints. */
  816. if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
  817. toggle_bp_registers(AARCH64_DBG_REG_WCR,
  818. DBG_ACTIVE_EL0,
  819. !next_debug_info->wps_disabled);
  820. }
  821. /*
  822. * CPU initialisation.
  823. */
  824. static int hw_breakpoint_reset(unsigned int cpu)
  825. {
  826. int i;
  827. struct perf_event **slots;
  828. /*
  829. * When a CPU goes through cold-boot, it does not have any installed
  830. * slot, so it is safe to share the same function for restoring and
  831. * resetting breakpoints; when a CPU is hotplugged in, it goes
  832. * through the slots, which are all empty, hence it just resets control
  833. * and value for debug registers.
  834. * When this function is triggered on warm-boot through a CPU PM
  835. * notifier some slots might be initialized; if so they are
  836. * reprogrammed according to the debug slots content.
  837. */
  838. for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
  839. if (slots[i]) {
  840. hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
  841. } else {
  842. write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
  843. write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
  844. }
  845. }
  846. for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
  847. if (slots[i]) {
  848. hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
  849. } else {
  850. write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
  851. write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
  852. }
  853. }
  854. return 0;
  855. }
  856. #ifdef CONFIG_CPU_PM
  857. extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
  858. #else
  859. static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
  860. {
  861. }
  862. #endif
  863. /*
  864. * One-time initialisation.
  865. */
  866. static int __init arch_hw_breakpoint_init(void)
  867. {
  868. int ret;
  869. core_num_brps = get_num_brps();
  870. core_num_wrps = get_num_wrps();
  871. pr_info("found %d breakpoint and %d watchpoint registers.\n",
  872. core_num_brps, core_num_wrps);
  873. /* Register debug fault handlers. */
  874. hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
  875. TRAP_HWBKPT, "hw-breakpoint handler");
  876. hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
  877. TRAP_HWBKPT, "hw-watchpoint handler");
  878. /*
  879. * Reset the breakpoint resources. We assume that a halting
  880. * debugger will leave the world in a nice state for us.
  881. */
  882. ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
  883. "perf/arm64/hw_breakpoint:starting",
  884. hw_breakpoint_reset, NULL);
  885. if (ret)
  886. pr_err("failed to register CPU hotplug notifier: %d\n", ret);
  887. /* Register cpu_suspend hw breakpoint restore hook */
  888. cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
  889. return ret;
  890. }
  891. arch_initcall(arch_hw_breakpoint_init);
  892. void hw_breakpoint_pmu_read(struct perf_event *bp)
  893. {
  894. }
  895. /*
  896. * Dummy function to register with die_notifier.
  897. */
  898. int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
  899. unsigned long val, void *data)
  900. {
  901. return NOTIFY_DONE;
  902. }