hyp_trace.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2023 Google LLC
  4. */
  5. #include <linux/arm-smccc.h>
  6. #include <linux/list.h>
  7. #include <linux/percpu-defs.h>
  8. #include <linux/ring_buffer.h>
  9. #include <linux/trace_events.h>
  10. #include <linux/tracefs.h>
  11. #include <asm/kvm_host.h>
  12. #include <asm/kvm_hyptrace.h>
  13. #include <asm/kvm_hypevents_defs.h>
  14. #include "hyp_constants.h"
  15. #include "hyp_trace.h"
  16. #define RB_POLL_MS 100
  17. #define TRACEFS_DIR "hyp"
  18. #define TRACEFS_MODE_WRITE 0640
  19. #define TRACEFS_MODE_READ 0440
  20. static bool hyp_trace_on;
  21. static bool hyp_free_tracing_deferred;
  22. static int hyp_trace_readers;
  23. static LIST_HEAD(hyp_pipe_readers);
  24. static struct trace_buffer *hyp_trace_buffer;
  25. static size_t hyp_trace_buffer_size = 7 << 10;
  26. static struct hyp_buffer_pages_backing hyp_buffer_pages_backing;
  27. static DEFINE_MUTEX(hyp_trace_lock);
  28. static DEFINE_PER_CPU(struct mutex, hyp_trace_reader_lock);
  29. static int bpage_backing_setup(struct hyp_trace_pack *pack)
  30. {
  31. size_t backing_size;
  32. void *start;
  33. if (hyp_buffer_pages_backing.start)
  34. return -EBUSY;
  35. backing_size = STRUCT_HYP_BUFFER_PAGE_SIZE *
  36. pack->trace_buffer_pack.total_pages;
  37. backing_size = PAGE_ALIGN(backing_size);
  38. start = alloc_pages_exact(backing_size, GFP_KERNEL_ACCOUNT);
  39. if (!start)
  40. return -ENOMEM;
  41. hyp_buffer_pages_backing.start = (unsigned long)start;
  42. hyp_buffer_pages_backing.size = backing_size;
  43. pack->backing.start = (unsigned long)start;
  44. pack->backing.size = backing_size;
  45. return 0;
  46. }
  47. static void bpage_backing_teardown(void)
  48. {
  49. unsigned long backing = hyp_buffer_pages_backing.start;
  50. if (!hyp_buffer_pages_backing.start)
  51. return;
  52. free_pages_exact((void *)backing, hyp_buffer_pages_backing.size);
  53. hyp_buffer_pages_backing.start = 0;
  54. hyp_buffer_pages_backing.size = 0;
  55. }
  56. /*
  57. * Configure the hyp tracing clock. So far, only one is supported: "boot". This
  58. * clock doesn't stop during suspend making it a good candidate. The downside is
  59. * if this clock is corrected by NTP while tracing, the hyp clock will slightly
  60. * drift compared to the host version.
  61. */
  62. static void hyp_clock_setup(struct hyp_trace_pack *pack)
  63. {
  64. struct kvm_nvhe_clock_data *clock_data = &pack->trace_clock_data;
  65. struct system_time_snapshot snap;
  66. ktime_get_snapshot(&snap);
  67. clock_data->epoch_cyc = snap.cycles;
  68. clock_data->epoch_ns = snap.boot;
  69. clock_data->mult = snap.mono_mult;
  70. clock_data->shift = snap.mono_shift;
  71. }
  72. static int __swap_reader_page(int cpu)
  73. {
  74. return kvm_call_hyp_nvhe(__pkvm_rb_swap_reader_page, cpu);
  75. }
  76. static int __update_footers(int cpu)
  77. {
  78. return kvm_call_hyp_nvhe(__pkvm_rb_update_footers, cpu);
  79. }
  80. struct ring_buffer_ext_cb hyp_cb = {
  81. .update_footers = __update_footers,
  82. .swap_reader = __swap_reader_page,
  83. };
  84. static inline int share_page(unsigned long va)
  85. {
  86. return kvm_call_hyp_nvhe(__pkvm_host_share_hyp, virt_to_pfn(va), 1);
  87. }
  88. static inline int unshare_page(unsigned long va)
  89. {
  90. return kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, virt_to_pfn(va), 1);
  91. }
  92. static int trace_pack_pages_apply(struct trace_buffer_pack *trace_pack,
  93. int (*func)(unsigned long))
  94. {
  95. struct ring_buffer_pack *rb_pack;
  96. int cpu, i, ret;
  97. for_each_ring_buffer_pack(rb_pack, cpu, trace_pack) {
  98. ret = func(rb_pack->reader_page_va);
  99. if (ret)
  100. return ret;
  101. for (i = 0; i < rb_pack->nr_pages; i++) {
  102. ret = func(rb_pack->page_va[i]);
  103. if (ret)
  104. return ret;
  105. }
  106. }
  107. return 0;
  108. }
  109. /*
  110. * hyp_trace_pack size depends on trace_buffer_pack's, so
  111. * trace_buffer_setup is in charge of the allocation for the former.
  112. */
  113. static int trace_buffer_setup(struct hyp_trace_pack **pack, size_t *pack_size)
  114. {
  115. struct trace_buffer_pack *trace_pack;
  116. int ret;
  117. hyp_trace_buffer = ring_buffer_alloc_ext(hyp_trace_buffer_size, &hyp_cb);
  118. if (!hyp_trace_buffer)
  119. return -ENOMEM;
  120. *pack_size = offsetof(struct hyp_trace_pack, trace_buffer_pack) +
  121. trace_buffer_pack_size(hyp_trace_buffer);
  122. /*
  123. * The hypervisor will unmap the pack from the host to protect the
  124. * reading. Page granularity for the pack allocation ensures no other
  125. * useful data will be unmapped.
  126. */
  127. *pack_size = PAGE_ALIGN(*pack_size);
  128. *pack = alloc_pages_exact(*pack_size, GFP_KERNEL);
  129. if (!*pack) {
  130. ret = -ENOMEM;
  131. goto err;
  132. }
  133. trace_pack = &(*pack)->trace_buffer_pack;
  134. WARN_ON(trace_buffer_pack(hyp_trace_buffer, trace_pack));
  135. ret = trace_pack_pages_apply(trace_pack, share_page);
  136. if (ret) {
  137. trace_pack_pages_apply(trace_pack, unshare_page);
  138. free_pages_exact(*pack, *pack_size);
  139. goto err;
  140. }
  141. return 0;
  142. err:
  143. ring_buffer_free(hyp_trace_buffer);
  144. hyp_trace_buffer = NULL;
  145. return ret;
  146. }
  147. static void trace_buffer_teardown(struct trace_buffer_pack *trace_pack)
  148. {
  149. bool alloc_trace_pack = !trace_pack;
  150. if (alloc_trace_pack) {
  151. trace_pack = kzalloc(trace_buffer_pack_size(hyp_trace_buffer), GFP_KERNEL);
  152. if (!trace_pack) {
  153. WARN_ON(1);
  154. goto end;
  155. }
  156. }
  157. WARN_ON(trace_buffer_pack(hyp_trace_buffer, trace_pack));
  158. WARN_ON(trace_pack_pages_apply(trace_pack, unshare_page));
  159. if (alloc_trace_pack)
  160. kfree(trace_pack);
  161. end:
  162. ring_buffer_free(hyp_trace_buffer);
  163. hyp_trace_buffer = NULL;
  164. }
  165. static int hyp_load_tracing(void)
  166. {
  167. struct hyp_trace_pack *pack;
  168. size_t pack_size;
  169. int ret;
  170. ret = trace_buffer_setup(&pack, &pack_size);
  171. if (ret)
  172. return ret;
  173. hyp_clock_setup(pack);
  174. ret = bpage_backing_setup(pack);
  175. if (ret)
  176. goto end_buffer_teardown;
  177. ret = kvm_call_hyp_nvhe(__pkvm_load_tracing, (unsigned long)pack, pack_size);
  178. if (!ret)
  179. goto end_free_pack;
  180. bpage_backing_teardown();
  181. end_buffer_teardown:
  182. trace_buffer_teardown(&pack->trace_buffer_pack);
  183. end_free_pack:
  184. free_pages_exact(pack, pack_size);
  185. return ret;
  186. }
  187. static void hyp_free_tracing(void)
  188. {
  189. WARN_ON(hyp_trace_readers || hyp_trace_on);
  190. if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_tracing)))
  191. return;
  192. trace_buffer_teardown(NULL);
  193. bpage_backing_teardown();
  194. }
  195. void hyp_poke_tracing(int cpu, const struct cpumask *cpus)
  196. {
  197. if (cpu == RING_BUFFER_ALL_CPUS) {
  198. for_each_cpu(cpu, cpus)
  199. WARN_ON_ONCE(ring_buffer_poke(hyp_trace_buffer, cpu));
  200. } else {
  201. WARN_ON_ONCE(ring_buffer_poke(hyp_trace_buffer, cpu));
  202. }
  203. }
  204. static int hyp_start_tracing(void)
  205. {
  206. int ret = 0;
  207. if (hyp_trace_on)
  208. return -EBUSY;
  209. if (!hyp_trace_buffer) {
  210. ret = hyp_load_tracing();
  211. if (ret)
  212. return ret;
  213. }
  214. ret = kvm_call_hyp_nvhe(__pkvm_enable_tracing, true);
  215. if (!ret) {
  216. struct ht_iterator *iter;
  217. list_for_each_entry(iter, &hyp_pipe_readers, list)
  218. schedule_delayed_work(&iter->poke_work,
  219. msecs_to_jiffies(RB_POLL_MS));
  220. hyp_trace_on = true;
  221. }
  222. return ret;
  223. }
  224. static void hyp_stop_tracing(void)
  225. {
  226. struct ht_iterator *iter;
  227. int ret;
  228. if (!hyp_trace_buffer || !hyp_trace_on)
  229. return;
  230. ret = kvm_call_hyp_nvhe(__pkvm_enable_tracing, false);
  231. if (ret) {
  232. WARN_ON(1);
  233. return;
  234. }
  235. hyp_trace_on = false;
  236. list_for_each_entry(iter, &hyp_pipe_readers, list) {
  237. cancel_delayed_work_sync(&iter->poke_work);
  238. hyp_poke_tracing(iter->cpu, iter->cpus);
  239. }
  240. }
  241. static ssize_t
  242. hyp_tracing_on(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
  243. {
  244. int err = 0;
  245. char c;
  246. if (!cnt || cnt > 2)
  247. return -EINVAL;
  248. if (get_user(c, ubuf))
  249. return -EFAULT;
  250. mutex_lock(&hyp_trace_lock);
  251. switch (c) {
  252. case '1':
  253. err = hyp_start_tracing();
  254. break;
  255. case '0':
  256. hyp_stop_tracing();
  257. break;
  258. default:
  259. err = -EINVAL;
  260. }
  261. mutex_unlock(&hyp_trace_lock);
  262. return err ? err : cnt;
  263. }
  264. static ssize_t hyp_tracing_on_read(struct file *filp, char __user *ubuf,
  265. size_t cnt, loff_t *ppos)
  266. {
  267. char buf[3];
  268. int r;
  269. mutex_lock(&hyp_trace_lock);
  270. r = sprintf(buf, "%d\n", hyp_trace_on);
  271. mutex_unlock(&hyp_trace_lock);
  272. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  273. }
  274. static const struct file_operations hyp_tracing_on_fops = {
  275. .write = hyp_tracing_on,
  276. .read = hyp_tracing_on_read,
  277. };
  278. static ssize_t hyp_buffer_size(struct file *filp, const char __user *ubuf,
  279. size_t cnt, loff_t *ppos)
  280. {
  281. unsigned long val;
  282. int ret;
  283. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  284. if (ret)
  285. return ret;
  286. if (!val)
  287. return -EINVAL;
  288. mutex_lock(&hyp_trace_lock);
  289. hyp_trace_buffer_size = val << 10; /* KB to B */
  290. mutex_unlock(&hyp_trace_lock);
  291. return cnt;
  292. }
  293. static ssize_t hyp_buffer_size_read(struct file *filp, char __user *ubuf,
  294. size_t cnt, loff_t *ppos)
  295. {
  296. char buf[64];
  297. int r;
  298. mutex_lock(&hyp_trace_lock);
  299. r = sprintf(buf, "%lu\n", hyp_trace_buffer_size >> 10);
  300. mutex_unlock(&hyp_trace_lock);
  301. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  302. }
  303. static const struct file_operations hyp_buffer_size_fops = {
  304. .write = hyp_buffer_size,
  305. .read = hyp_buffer_size_read,
  306. };
  307. static inline void hyp_trace_read_start(int cpu)
  308. {
  309. if (cpu != RING_BUFFER_ALL_CPUS) {
  310. mutex_lock(&per_cpu(hyp_trace_reader_lock, cpu));
  311. return;
  312. }
  313. for_each_possible_cpu(cpu)
  314. mutex_lock(&per_cpu(hyp_trace_reader_lock, cpu));
  315. }
  316. static inline void hyp_trace_read_stop(int cpu)
  317. {
  318. if (cpu != RING_BUFFER_ALL_CPUS) {
  319. mutex_unlock(&per_cpu(hyp_trace_reader_lock, cpu));
  320. return;
  321. }
  322. for_each_possible_cpu(cpu)
  323. mutex_unlock(&per_cpu(hyp_trace_reader_lock, cpu));
  324. }
  325. static void ht_print_trace_time(struct ht_iterator *iter)
  326. {
  327. unsigned long usecs_rem;
  328. u64 ts_ns = iter->ts;
  329. do_div(ts_ns, 1000);
  330. usecs_rem = do_div(ts_ns, USEC_PER_SEC);
  331. trace_seq_printf(&iter->seq, "%5lu.%06lu: ",
  332. (unsigned long)ts_ns, usecs_rem);
  333. }
  334. static void ht_print_trace_cpu(struct ht_iterator *iter)
  335. {
  336. trace_seq_printf(&iter->seq, "[%03d]\t", iter->ent_cpu);
  337. }
  338. extern struct trace_event *ftrace_find_event(int type);
  339. static int ht_print_trace_fmt(struct ht_iterator *iter)
  340. {
  341. struct trace_event *e;
  342. if (iter->lost_events)
  343. trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
  344. iter->ent_cpu, iter->lost_events);
  345. ht_print_trace_cpu(iter);
  346. ht_print_trace_time(iter);
  347. e = ftrace_find_event(iter->ent->id);
  348. if (e)
  349. e->funcs->trace((struct trace_iterator *)iter, 0, e);
  350. else
  351. trace_seq_printf(&iter->seq, "Unknown event id %d\n", iter->ent->id);
  352. return trace_seq_has_overflowed(&iter->seq) ? -EOVERFLOW : 0;
  353. };
  354. static struct ring_buffer_event *ht_next_event(struct ht_iterator *iter,
  355. u64 *ts, int *cpu)
  356. {
  357. struct ring_buffer_event *evt = NULL;
  358. int _cpu;
  359. u64 _ts;
  360. if (!iter->buf_iter)
  361. return NULL;
  362. if (iter->cpu != RING_BUFFER_ALL_CPUS) {
  363. evt = ring_buffer_iter_peek(iter->buf_iter[iter->cpu], ts);
  364. if (!evt)
  365. return NULL;
  366. *cpu = iter->cpu;
  367. ring_buffer_iter_advance(iter->buf_iter[*cpu]);
  368. return evt;
  369. }
  370. *ts = LLONG_MAX;
  371. for_each_cpu(_cpu, iter->cpus) {
  372. struct ring_buffer_event *_evt;
  373. _evt = ring_buffer_iter_peek(iter->buf_iter[_cpu], &_ts);
  374. if (!_evt)
  375. continue;
  376. if (_ts >= *ts)
  377. continue;
  378. *ts = _ts;
  379. *cpu = _cpu;
  380. evt = _evt;
  381. }
  382. if (evt)
  383. ring_buffer_iter_advance(iter->buf_iter[*cpu]);
  384. return evt;
  385. }
  386. static void *ht_next(struct seq_file *m, void *v, loff_t *pos)
  387. {
  388. struct ht_iterator *iter = m->private;
  389. struct ring_buffer_event *evt;
  390. int cpu;
  391. u64 ts;
  392. (*pos)++;
  393. evt = ht_next_event(iter, &ts, &cpu);
  394. if (!evt)
  395. return NULL;
  396. iter->ent = (struct hyp_entry_hdr *)&evt->array[1];
  397. iter->ts = ts;
  398. iter->ent_size = evt->array[0];
  399. iter->ent_cpu = cpu;
  400. return iter;
  401. }
  402. static void ht_iter_reset(struct ht_iterator *iter)
  403. {
  404. int cpu = iter->cpu;
  405. if (!iter->buf_iter)
  406. return;
  407. if (cpu != RING_BUFFER_ALL_CPUS) {
  408. ring_buffer_iter_reset(iter->buf_iter[cpu]);
  409. return;
  410. }
  411. for_each_cpu(cpu, iter->cpus)
  412. ring_buffer_iter_reset(iter->buf_iter[cpu]);
  413. }
  414. static void *ht_start(struct seq_file *m, loff_t *pos)
  415. {
  416. struct ht_iterator *iter = m->private;
  417. if (*pos == 0) {
  418. ht_iter_reset(iter);
  419. (*pos)++;
  420. iter->ent = NULL;
  421. return iter;
  422. }
  423. hyp_trace_read_start(iter->cpu);
  424. return ht_next(m, NULL, pos);
  425. }
  426. static void ht_stop(struct seq_file *m, void *v)
  427. {
  428. struct ht_iterator *iter = m->private;
  429. hyp_trace_read_stop(iter->cpu);
  430. }
  431. static void ht_total_entries(struct ht_iterator *iter, unsigned long *entries,
  432. unsigned long *overrun)
  433. {
  434. int cpu = iter->cpu;
  435. *entries = 0;
  436. *overrun = 0;
  437. if (!hyp_trace_buffer)
  438. return;
  439. if (cpu != RING_BUFFER_ALL_CPUS) {
  440. *entries = ring_buffer_entries_cpu(hyp_trace_buffer, cpu);
  441. *overrun = ring_buffer_overrun_cpu(hyp_trace_buffer, cpu);
  442. return;
  443. }
  444. for_each_cpu(cpu, iter->cpus) {
  445. *entries += ring_buffer_entries_cpu(hyp_trace_buffer, cpu);
  446. *overrun += ring_buffer_overrun_cpu(hyp_trace_buffer, cpu);
  447. }
  448. }
  449. static int ht_show(struct seq_file *m, void *v)
  450. {
  451. struct ht_iterator *iter = v;
  452. if (!iter->ent) {
  453. unsigned long entries, overrun;
  454. ht_total_entries(iter, &entries, &overrun);
  455. seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu\n",
  456. entries, overrun + entries);
  457. } else {
  458. ht_print_trace_fmt(iter);
  459. trace_print_seq(m, &iter->seq);
  460. }
  461. return 0;
  462. }
  463. static const struct seq_operations hyp_trace_ops = {
  464. .start = ht_start,
  465. .next = ht_next,
  466. .stop = ht_stop,
  467. .show = ht_show,
  468. };
  469. static int hyp_trace_reset(int cpu)
  470. {
  471. if (!hyp_trace_buffer)
  472. return 0;
  473. if (hyp_trace_on)
  474. return -EBUSY;
  475. if (cpu == RING_BUFFER_ALL_CPUS) {
  476. if (hyp_trace_readers)
  477. hyp_free_tracing_deferred = true;
  478. else
  479. hyp_free_tracing();
  480. return 0;
  481. }
  482. ring_buffer_reset_cpu(hyp_trace_buffer, cpu);
  483. return 0;
  484. }
  485. static void hyp_inc_readers(void)
  486. {
  487. hyp_trace_readers++;
  488. }
  489. static void hyp_dec_readers(void)
  490. {
  491. hyp_trace_readers--;
  492. WARN_ON(hyp_trace_readers < 0);
  493. if (hyp_trace_readers)
  494. return;
  495. if (hyp_free_tracing_deferred) {
  496. hyp_free_tracing();
  497. hyp_free_tracing_deferred = false;
  498. }
  499. }
  500. static int hyp_trace_open(struct inode *inode, struct file *file)
  501. {
  502. int cpu = (s64)inode->i_private;
  503. int ret = 0;
  504. mutex_lock(&hyp_trace_lock);
  505. if (file->f_mode & FMODE_WRITE)
  506. ret = hyp_trace_reset(cpu);
  507. mutex_unlock(&hyp_trace_lock);
  508. return ret;
  509. }
  510. static ssize_t hyp_trace_read(struct file *filp, char __user *ubuf,
  511. size_t cnt, loff_t *ppos)
  512. {
  513. char buf[] = "** Reading trace not yet supported **\n";
  514. return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
  515. }
  516. static ssize_t hyp_trace_write(struct file *filp, const char __user *ubuf,
  517. size_t count, loff_t *ppos)
  518. {
  519. /* No matter the input, writing resets the buffer */
  520. return count;
  521. }
  522. static const struct file_operations hyp_trace_fops = {
  523. .open = hyp_trace_open,
  524. .read = hyp_trace_read,
  525. .write = hyp_trace_write,
  526. .release = NULL,
  527. };
  528. static struct ring_buffer_event *__ht_next_pipe_event(struct ht_iterator *iter)
  529. {
  530. struct ring_buffer_event *evt = NULL;
  531. int cpu = iter->cpu;
  532. if (cpu != RING_BUFFER_ALL_CPUS) {
  533. if (ring_buffer_empty_cpu(hyp_trace_buffer, cpu))
  534. return NULL;
  535. iter->ent_cpu = cpu;
  536. return ring_buffer_peek(hyp_trace_buffer, cpu, &iter->ts,
  537. &iter->lost_events);
  538. }
  539. iter->ts = LLONG_MAX;
  540. for_each_cpu(cpu, iter->cpus) {
  541. struct ring_buffer_event *_evt;
  542. unsigned long lost_events;
  543. u64 ts;
  544. if (ring_buffer_empty_cpu(hyp_trace_buffer, cpu))
  545. continue;
  546. _evt = ring_buffer_peek(hyp_trace_buffer, cpu, &ts,
  547. &lost_events);
  548. if (!_evt)
  549. continue;
  550. if (ts >= iter->ts)
  551. continue;
  552. iter->ts = ts;
  553. iter->ent_cpu = cpu;
  554. iter->lost_events = lost_events;
  555. evt = _evt;
  556. }
  557. return evt;
  558. }
  559. static void *ht_next_pipe_event(struct ht_iterator *iter)
  560. {
  561. struct ring_buffer_event *event;
  562. event = __ht_next_pipe_event(iter);
  563. if (!event)
  564. return NULL;
  565. iter->ent = (struct hyp_entry_hdr *)&event->array[1];
  566. iter->ent_size = event->array[0];
  567. return iter;
  568. }
  569. static ssize_t
  570. hyp_trace_pipe_read(struct file *file, char __user *ubuf,
  571. size_t cnt, loff_t *ppos)
  572. {
  573. struct ht_iterator *iter = (struct ht_iterator *)file->private_data;
  574. int ret;
  575. /* seq_buf buffer size */
  576. if (cnt != PAGE_SIZE)
  577. return -EINVAL;
  578. trace_seq_init(&iter->seq);
  579. again:
  580. ret = ring_buffer_wait(hyp_trace_buffer, iter->cpu, 0);
  581. if (ret < 0)
  582. return ret;
  583. hyp_trace_read_start(iter->cpu);
  584. while (ht_next_pipe_event(iter)) {
  585. int prev_len = iter->seq.seq.len;
  586. if (ht_print_trace_fmt(iter)) {
  587. iter->seq.seq.len = prev_len;
  588. break;
  589. }
  590. ring_buffer_consume(hyp_trace_buffer, iter->ent_cpu, NULL,
  591. NULL);
  592. }
  593. hyp_trace_read_stop(iter->cpu);
  594. ret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  595. if (ret == -EBUSY)
  596. goto again;
  597. return ret;
  598. }
  599. static void __poke_reader(struct work_struct *work)
  600. {
  601. struct delayed_work *dwork = to_delayed_work(work);
  602. struct ht_iterator *iter;
  603. iter = container_of(dwork, struct ht_iterator, poke_work);
  604. hyp_poke_tracing(iter->cpu, iter->cpus);
  605. schedule_delayed_work((struct delayed_work *)work,
  606. msecs_to_jiffies(RB_POLL_MS));
  607. }
  608. static int hyp_trace_pipe_open(struct inode *inode, struct file *file)
  609. {
  610. int cpu = (s64)inode->i_private;
  611. struct ht_iterator *iter;
  612. int ret;
  613. mutex_lock(&hyp_trace_lock);
  614. if (!hyp_trace_buffer) {
  615. ret = hyp_load_tracing();
  616. if (ret)
  617. goto unlock;
  618. }
  619. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  620. if (!iter) {
  621. ret = -ENOMEM;
  622. goto unlock;
  623. }
  624. iter->cpu = cpu;
  625. file->private_data = iter;
  626. if (cpu == RING_BUFFER_ALL_CPUS) {
  627. if (!zalloc_cpumask_var(&iter->cpus, GFP_KERNEL)) {
  628. ret = -ENOMEM;
  629. goto unlock;
  630. }
  631. for_each_possible_cpu(cpu) {
  632. if (!ring_buffer_poke(hyp_trace_buffer, cpu))
  633. cpumask_set_cpu(cpu, iter->cpus);
  634. }
  635. } else {
  636. ret = ring_buffer_poke(hyp_trace_buffer, cpu);
  637. if (ret)
  638. goto unlock;
  639. }
  640. INIT_DELAYED_WORK(&iter->poke_work, __poke_reader);
  641. if (hyp_trace_on)
  642. schedule_delayed_work(&iter->poke_work,
  643. msecs_to_jiffies(RB_POLL_MS));
  644. list_add(&iter->list, &hyp_pipe_readers);
  645. hyp_inc_readers();
  646. unlock:
  647. mutex_unlock(&hyp_trace_lock);
  648. if (ret)
  649. kfree(iter);
  650. return ret;
  651. }
  652. static int hyp_trace_pipe_release(struct inode *inode, struct file *file)
  653. {
  654. struct ht_iterator *iter = file->private_data;
  655. mutex_lock(&hyp_trace_lock);
  656. hyp_dec_readers();
  657. list_del(&iter->list);
  658. mutex_unlock(&hyp_trace_lock);
  659. cancel_delayed_work_sync(&iter->poke_work);
  660. free_cpumask_var(iter->cpus);
  661. kfree(iter);
  662. return 0;
  663. }
  664. static const struct file_operations hyp_trace_pipe_fops = {
  665. .open = hyp_trace_pipe_open,
  666. .read = hyp_trace_pipe_read,
  667. .release = hyp_trace_pipe_release,
  668. .llseek = no_llseek,
  669. };
  670. static ssize_t
  671. hyp_trace_raw_read(struct file *file, char __user *ubuf,
  672. size_t cnt, loff_t *ppos)
  673. {
  674. struct ht_iterator *iter = (struct ht_iterator *)file->private_data;
  675. size_t size;
  676. int ret;
  677. if (iter->copy_leftover)
  678. goto read;
  679. again:
  680. hyp_trace_read_start(iter->cpu);
  681. ret = ring_buffer_read_page(hyp_trace_buffer, &iter->spare,
  682. cnt, iter->cpu, 0);
  683. hyp_trace_read_stop(iter->cpu);
  684. if (ret < 0) {
  685. if (!ring_buffer_empty_cpu(hyp_trace_buffer, iter->cpu))
  686. return 0;
  687. ret = ring_buffer_wait(hyp_trace_buffer, iter->cpu, 0);
  688. if (ret < 0)
  689. return ret;
  690. goto again;
  691. }
  692. iter->copy_leftover = 0;
  693. read:
  694. size = PAGE_SIZE - iter->copy_leftover;
  695. if (size > cnt)
  696. size = cnt;
  697. ret = copy_to_user(ubuf, iter->spare + PAGE_SIZE - size, size);
  698. if (ret == size)
  699. return -EFAULT;
  700. size -= ret;
  701. *ppos += size;
  702. iter->copy_leftover = ret;
  703. return size;
  704. }
  705. static int hyp_trace_raw_open(struct inode *inode, struct file *file)
  706. {
  707. int ret = hyp_trace_pipe_open(inode, file);
  708. struct ht_iterator *iter;
  709. if (ret)
  710. return ret;
  711. iter = file->private_data;
  712. iter->spare = ring_buffer_alloc_read_page(hyp_trace_buffer, iter->cpu);
  713. if (IS_ERR(iter->spare)) {
  714. ret = PTR_ERR(iter->spare);
  715. iter->spare = NULL;
  716. return ret;
  717. }
  718. return 0;
  719. }
  720. static int hyp_trace_raw_release(struct inode *inode, struct file *file)
  721. {
  722. struct ht_iterator *iter = file->private_data;
  723. ring_buffer_free_read_page(hyp_trace_buffer, iter->cpu, iter->spare);
  724. return hyp_trace_pipe_release(inode, file);
  725. }
  726. static const struct file_operations hyp_trace_raw_fops = {
  727. .open = hyp_trace_raw_open,
  728. .read = hyp_trace_raw_read,
  729. .release = hyp_trace_raw_release,
  730. .llseek = no_llseek,
  731. };
  732. static int hyp_trace_clock_show(struct seq_file *m, void *v)
  733. {
  734. seq_printf(m, "[boot]\n");
  735. return 0;
  736. }
  737. static int hyp_trace_clock_open(struct inode *inode, struct file *file)
  738. {
  739. return single_open(file, hyp_trace_clock_show, NULL);
  740. }
  741. static const struct file_operations hyp_trace_clock_fops = {
  742. .open = hyp_trace_clock_open,
  743. .read = seq_read,
  744. .llseek = seq_lseek,
  745. .release = single_release,
  746. };
  747. static void hyp_tracefs_create_cpu_file(const char *file_name,
  748. int cpu,
  749. umode_t mode,
  750. const struct file_operations *fops,
  751. struct dentry *parent)
  752. {
  753. if (!tracefs_create_file(file_name, mode, parent, (void *)(s64)cpu, fops))
  754. pr_warn("Failed to create tracefs %pd/%s\n", parent, file_name);
  755. }
  756. void kvm_hyp_init_events_tracefs(struct dentry *parent);
  757. bool kvm_hyp_events_enable_early(void);
  758. int init_hyp_tracefs(void)
  759. {
  760. struct dentry *d, *root_dir, *per_cpu_root_dir;
  761. char per_cpu_name[16];
  762. int err, cpu;
  763. if (!is_protected_kvm_enabled())
  764. return 0;
  765. root_dir = tracefs_create_dir(TRACEFS_DIR, NULL);
  766. if (!root_dir) {
  767. pr_err("Failed to create tracefs "TRACEFS_DIR"/\n");
  768. return -ENODEV;
  769. }
  770. d = tracefs_create_file("tracing_on", TRACEFS_MODE_WRITE, root_dir,
  771. NULL, &hyp_tracing_on_fops);
  772. if (!d) {
  773. pr_err("Failed to create tracefs "TRACEFS_DIR"/tracing_on\n");
  774. return -ENODEV;
  775. }
  776. d = tracefs_create_file("buffer_size_kb", TRACEFS_MODE_WRITE, root_dir,
  777. NULL, &hyp_buffer_size_fops);
  778. if (!d)
  779. pr_err("Failed to create tracefs "TRACEFS_DIR"/buffer_size_kb\n");
  780. d = tracefs_create_file("trace_clock", TRACEFS_MODE_READ, root_dir, NULL,
  781. &hyp_trace_clock_fops);
  782. if (!d)
  783. pr_err("Failed to create tracefs "TRACEFS_DIR"/trace_clock\n");
  784. hyp_tracefs_create_cpu_file("trace", RING_BUFFER_ALL_CPUS,
  785. TRACEFS_MODE_WRITE, &hyp_trace_fops,
  786. root_dir);
  787. hyp_tracefs_create_cpu_file("trace_pipe", RING_BUFFER_ALL_CPUS,
  788. TRACEFS_MODE_READ, &hyp_trace_pipe_fops,
  789. root_dir);
  790. per_cpu_root_dir = tracefs_create_dir("per_cpu", root_dir);
  791. if (!per_cpu_root_dir) {
  792. pr_err("Failed to create tracefs "TRACEFS_DIR"/per_cpu/\n");
  793. return -ENODEV;
  794. }
  795. for_each_possible_cpu(cpu) {
  796. struct dentry *dir;
  797. snprintf(per_cpu_name, sizeof(per_cpu_name), "cpu%d", cpu);
  798. dir = tracefs_create_dir(per_cpu_name, per_cpu_root_dir);
  799. if (!dir) {
  800. pr_warn("Failed to create tracefs "TRACEFS_DIR"/per_cpu/cpu%d\n",
  801. cpu);
  802. continue;
  803. }
  804. hyp_tracefs_create_cpu_file("trace", cpu, TRACEFS_MODE_WRITE,
  805. &hyp_trace_fops, dir);
  806. hyp_tracefs_create_cpu_file("trace_pipe", cpu, TRACEFS_MODE_READ,
  807. &hyp_trace_pipe_fops, dir);
  808. hyp_tracefs_create_cpu_file("trace_pipe_raw", cpu,
  809. TRACEFS_MODE_READ,
  810. &hyp_trace_raw_fops, dir);
  811. }
  812. kvm_hyp_init_events_tracefs(root_dir);
  813. if (kvm_hyp_events_enable_early()) {
  814. err = hyp_start_tracing();
  815. if (err)
  816. pr_warn("Failed to start early events tracing: %d\n", err);
  817. }
  818. return 0;
  819. }