clock.c 1.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <nvhe/clock.h>
  3. #include <asm/arch_timer.h>
  4. #include <asm/div64.h>
  5. static struct kvm_nvhe_clock_data trace_clock_data;
  6. /*
  7. * Update without any locks! This is fine because tracing, the sole user of this
  8. * clock is ordering the memory and protects from races between read and
  9. * updates.
  10. */
  11. void trace_clock_update(struct kvm_nvhe_clock_data *data)
  12. {
  13. trace_clock_data.mult = data->mult;
  14. trace_clock_data.shift = data->shift;
  15. trace_clock_data.epoch_ns = data->epoch_ns;
  16. trace_clock_data.epoch_cyc = data->epoch_cyc;
  17. }
  18. /*
  19. * This clock is relying on host provided slope and epoch values to return
  20. * something synchronized with the host. The downside is we can't trust the
  21. * output which must not be used for anything else than debugging.
  22. */
  23. u64 trace_clock(void)
  24. {
  25. u64 cyc = __arch_counter_get_cntpct() - trace_clock_data.epoch_cyc;
  26. __uint128_t ns;
  27. /*
  28. * The host kernel can avoid the 64-bits overflow of the multiplication
  29. * by updating the epoch value with a timer (see
  30. * kernel/time/clocksource.c). The hypervisor doesn't have that option,
  31. * so let's do a more costly 128-bits mult here.
  32. */
  33. ns = (__uint128_t)cyc * trace_clock_data.mult;
  34. ns >>= trace_clock_data.shift;
  35. return (u64)ns + trace_clock_data.epoch_ns;
  36. }