hyperv_timer.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Clocksource driver for the synthetic counter and timers
  4. * provided by the Hyper-V hypervisor to guest VMs, as described
  5. * in the Hyper-V Top Level Functional Spec (TLFS). This driver
  6. * is instruction set architecture independent.
  7. *
  8. * Copyright (C) 2019, Microsoft, Inc.
  9. *
  10. * Author: Michael Kelley <[email protected]>
  11. */
  12. #include <linux/percpu.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/clockchips.h>
  15. #include <linux/clocksource.h>
  16. #include <linux/sched_clock.h>
  17. #include <linux/mm.h>
  18. #include <linux/cpuhotplug.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/irq.h>
  21. #include <linux/acpi.h>
  22. #include <linux/hyperv.h>
  23. #include <clocksource/hyperv_timer.h>
  24. #include <asm/hyperv-tlfs.h>
  25. #include <asm/mshyperv.h>
  26. static struct clock_event_device __percpu *hv_clock_event;
  27. static u64 hv_sched_clock_offset __ro_after_init;
  28. /*
  29. * If false, we're using the old mechanism for stimer0 interrupts
  30. * where it sends a VMbus message when it expires. The old
  31. * mechanism is used when running on older versions of Hyper-V
  32. * that don't support Direct Mode. While Hyper-V provides
  33. * four stimer's per CPU, Linux uses only stimer0.
  34. *
  35. * Because Direct Mode does not require processing a VMbus
  36. * message, stimer interrupts can be enabled earlier in the
  37. * process of booting a CPU, and consistent with when timer
  38. * interrupts are enabled for other clocksource drivers.
  39. * However, for legacy versions of Hyper-V when Direct Mode
  40. * is not enabled, setting up stimer interrupts must be
  41. * delayed until VMbus is initialized and can process the
  42. * interrupt message.
  43. */
  44. static bool direct_mode_enabled;
  45. static int stimer0_irq = -1;
  46. static int stimer0_message_sint;
  47. static DEFINE_PER_CPU(long, stimer0_evt);
  48. /*
  49. * Common code for stimer0 interrupts coming via Direct Mode or
  50. * as a VMbus message.
  51. */
  52. void hv_stimer0_isr(void)
  53. {
  54. struct clock_event_device *ce;
  55. ce = this_cpu_ptr(hv_clock_event);
  56. ce->event_handler(ce);
  57. }
  58. EXPORT_SYMBOL_GPL(hv_stimer0_isr);
  59. /*
  60. * stimer0 interrupt handler for architectures that support
  61. * per-cpu interrupts, which also implies Direct Mode.
  62. */
  63. static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id)
  64. {
  65. hv_stimer0_isr();
  66. return IRQ_HANDLED;
  67. }
  68. static int hv_ce_set_next_event(unsigned long delta,
  69. struct clock_event_device *evt)
  70. {
  71. u64 current_tick;
  72. current_tick = hv_read_reference_counter();
  73. current_tick += delta;
  74. hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
  75. return 0;
  76. }
  77. static int hv_ce_shutdown(struct clock_event_device *evt)
  78. {
  79. hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
  80. hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
  81. if (direct_mode_enabled && stimer0_irq >= 0)
  82. disable_percpu_irq(stimer0_irq);
  83. return 0;
  84. }
  85. static int hv_ce_set_oneshot(struct clock_event_device *evt)
  86. {
  87. union hv_stimer_config timer_cfg;
  88. timer_cfg.as_uint64 = 0;
  89. timer_cfg.enable = 1;
  90. timer_cfg.auto_enable = 1;
  91. if (direct_mode_enabled) {
  92. /*
  93. * When it expires, the timer will directly interrupt
  94. * on the specified hardware vector/IRQ.
  95. */
  96. timer_cfg.direct_mode = 1;
  97. timer_cfg.apic_vector = HYPERV_STIMER0_VECTOR;
  98. if (stimer0_irq >= 0)
  99. enable_percpu_irq(stimer0_irq, IRQ_TYPE_NONE);
  100. } else {
  101. /*
  102. * When it expires, the timer will generate a VMbus message,
  103. * to be handled by the normal VMbus interrupt handler.
  104. */
  105. timer_cfg.direct_mode = 0;
  106. timer_cfg.sintx = stimer0_message_sint;
  107. }
  108. hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
  109. return 0;
  110. }
  111. /*
  112. * hv_stimer_init - Per-cpu initialization of the clockevent
  113. */
  114. static int hv_stimer_init(unsigned int cpu)
  115. {
  116. struct clock_event_device *ce;
  117. if (!hv_clock_event)
  118. return 0;
  119. ce = per_cpu_ptr(hv_clock_event, cpu);
  120. ce->name = "Hyper-V clockevent";
  121. ce->features = CLOCK_EVT_FEAT_ONESHOT;
  122. ce->cpumask = cpumask_of(cpu);
  123. ce->rating = 1000;
  124. ce->set_state_shutdown = hv_ce_shutdown;
  125. ce->set_state_oneshot = hv_ce_set_oneshot;
  126. ce->set_next_event = hv_ce_set_next_event;
  127. clockevents_config_and_register(ce,
  128. HV_CLOCK_HZ,
  129. HV_MIN_DELTA_TICKS,
  130. HV_MAX_MAX_DELTA_TICKS);
  131. return 0;
  132. }
  133. /*
  134. * hv_stimer_cleanup - Per-cpu cleanup of the clockevent
  135. */
  136. int hv_stimer_cleanup(unsigned int cpu)
  137. {
  138. struct clock_event_device *ce;
  139. if (!hv_clock_event)
  140. return 0;
  141. /*
  142. * In the legacy case where Direct Mode is not enabled
  143. * (which can only be on x86/64), stimer cleanup happens
  144. * relatively early in the CPU offlining process. We
  145. * must unbind the stimer-based clockevent device so
  146. * that the LAPIC timer can take over until clockevents
  147. * are no longer needed in the offlining process. Note
  148. * that clockevents_unbind_device() eventually calls
  149. * hv_ce_shutdown().
  150. *
  151. * The unbind should not be done when Direct Mode is
  152. * enabled because we may be on an architecture where
  153. * there are no other clockevent devices to fallback to.
  154. */
  155. ce = per_cpu_ptr(hv_clock_event, cpu);
  156. if (direct_mode_enabled)
  157. hv_ce_shutdown(ce);
  158. else
  159. clockevents_unbind_device(ce, cpu);
  160. return 0;
  161. }
  162. EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
  163. /*
  164. * These placeholders are overridden by arch specific code on
  165. * architectures that need special setup of the stimer0 IRQ because
  166. * they don't support per-cpu IRQs (such as x86/x64).
  167. */
  168. void __weak hv_setup_stimer0_handler(void (*handler)(void))
  169. {
  170. };
  171. void __weak hv_remove_stimer0_handler(void)
  172. {
  173. };
  174. /* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
  175. static int hv_setup_stimer0_irq(void)
  176. {
  177. int ret;
  178. ret = acpi_register_gsi(NULL, HYPERV_STIMER0_VECTOR,
  179. ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH);
  180. if (ret < 0) {
  181. pr_err("Can't register Hyper-V stimer0 GSI. Error %d", ret);
  182. return ret;
  183. }
  184. stimer0_irq = ret;
  185. ret = request_percpu_irq(stimer0_irq, hv_stimer0_percpu_isr,
  186. "Hyper-V stimer0", &stimer0_evt);
  187. if (ret) {
  188. pr_err("Can't request Hyper-V stimer0 IRQ %d. Error %d",
  189. stimer0_irq, ret);
  190. acpi_unregister_gsi(stimer0_irq);
  191. stimer0_irq = -1;
  192. }
  193. return ret;
  194. }
  195. static void hv_remove_stimer0_irq(void)
  196. {
  197. if (stimer0_irq == -1) {
  198. hv_remove_stimer0_handler();
  199. } else {
  200. free_percpu_irq(stimer0_irq, &stimer0_evt);
  201. acpi_unregister_gsi(stimer0_irq);
  202. stimer0_irq = -1;
  203. }
  204. }
  205. /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
  206. int hv_stimer_alloc(bool have_percpu_irqs)
  207. {
  208. int ret;
  209. /*
  210. * Synthetic timers are always available except on old versions of
  211. * Hyper-V on x86. In that case, return as error as Linux will use a
  212. * clockevent based on emulated LAPIC timer hardware.
  213. */
  214. if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
  215. return -EINVAL;
  216. hv_clock_event = alloc_percpu(struct clock_event_device);
  217. if (!hv_clock_event)
  218. return -ENOMEM;
  219. direct_mode_enabled = ms_hyperv.misc_features &
  220. HV_STIMER_DIRECT_MODE_AVAILABLE;
  221. /*
  222. * If Direct Mode isn't enabled, the remainder of the initialization
  223. * is done later by hv_stimer_legacy_init()
  224. */
  225. if (!direct_mode_enabled)
  226. return 0;
  227. if (have_percpu_irqs) {
  228. ret = hv_setup_stimer0_irq();
  229. if (ret)
  230. goto free_clock_event;
  231. } else {
  232. hv_setup_stimer0_handler(hv_stimer0_isr);
  233. }
  234. /*
  235. * Since we are in Direct Mode, stimer initialization
  236. * can be done now with a CPUHP value in the same range
  237. * as other clockevent devices.
  238. */
  239. ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
  240. "clockevents/hyperv/stimer:starting",
  241. hv_stimer_init, hv_stimer_cleanup);
  242. if (ret < 0) {
  243. hv_remove_stimer0_irq();
  244. goto free_clock_event;
  245. }
  246. return ret;
  247. free_clock_event:
  248. free_percpu(hv_clock_event);
  249. hv_clock_event = NULL;
  250. return ret;
  251. }
  252. EXPORT_SYMBOL_GPL(hv_stimer_alloc);
  253. /*
  254. * hv_stimer_legacy_init -- Called from the VMbus driver to handle
  255. * the case when Direct Mode is not enabled, and the stimer
  256. * must be initialized late in the CPU onlining process.
  257. *
  258. */
  259. void hv_stimer_legacy_init(unsigned int cpu, int sint)
  260. {
  261. if (direct_mode_enabled)
  262. return;
  263. /*
  264. * This function gets called by each vCPU, so setting the
  265. * global stimer_message_sint value each time is conceptually
  266. * not ideal, but the value passed in is always the same and
  267. * it avoids introducing yet another interface into this
  268. * clocksource driver just to set the sint in the legacy case.
  269. */
  270. stimer0_message_sint = sint;
  271. (void)hv_stimer_init(cpu);
  272. }
  273. EXPORT_SYMBOL_GPL(hv_stimer_legacy_init);
  274. /*
  275. * hv_stimer_legacy_cleanup -- Called from the VMbus driver to
  276. * handle the case when Direct Mode is not enabled, and the
  277. * stimer must be cleaned up early in the CPU offlining
  278. * process.
  279. */
  280. void hv_stimer_legacy_cleanup(unsigned int cpu)
  281. {
  282. if (direct_mode_enabled)
  283. return;
  284. (void)hv_stimer_cleanup(cpu);
  285. }
  286. EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
  287. /*
  288. * Do a global cleanup of clockevents for the cases of kexec and
  289. * vmbus exit
  290. */
  291. void hv_stimer_global_cleanup(void)
  292. {
  293. int cpu;
  294. /*
  295. * hv_stime_legacy_cleanup() will stop the stimer if Direct
  296. * Mode is not enabled, and fallback to the LAPIC timer.
  297. */
  298. for_each_present_cpu(cpu) {
  299. hv_stimer_legacy_cleanup(cpu);
  300. }
  301. if (!hv_clock_event)
  302. return;
  303. if (direct_mode_enabled) {
  304. cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
  305. hv_remove_stimer0_irq();
  306. stimer0_irq = -1;
  307. }
  308. free_percpu(hv_clock_event);
  309. hv_clock_event = NULL;
  310. }
  311. EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
  312. /*
  313. * Code and definitions for the Hyper-V clocksources. Two
  314. * clocksources are defined: one that reads the Hyper-V defined MSR, and
  315. * the other that uses the TSC reference page feature as defined in the
  316. * TLFS. The MSR version is for compatibility with old versions of
  317. * Hyper-V and 32-bit x86. The TSC reference page version is preferred.
  318. */
  319. static union {
  320. struct ms_hyperv_tsc_page page;
  321. u8 reserved[PAGE_SIZE];
  322. } tsc_pg __aligned(PAGE_SIZE);
  323. struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
  324. {
  325. return &tsc_pg.page;
  326. }
  327. EXPORT_SYMBOL_GPL(hv_get_tsc_page);
  328. static u64 notrace read_hv_clock_tsc(void)
  329. {
  330. u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
  331. if (current_tick == U64_MAX)
  332. current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
  333. return current_tick;
  334. }
  335. static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
  336. {
  337. return read_hv_clock_tsc();
  338. }
  339. static u64 notrace read_hv_sched_clock_tsc(void)
  340. {
  341. return (read_hv_clock_tsc() - hv_sched_clock_offset) *
  342. (NSEC_PER_SEC / HV_CLOCK_HZ);
  343. }
  344. static void suspend_hv_clock_tsc(struct clocksource *arg)
  345. {
  346. union hv_reference_tsc_msr tsc_msr;
  347. /* Disable the TSC page */
  348. tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
  349. tsc_msr.enable = 0;
  350. hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
  351. }
  352. static void resume_hv_clock_tsc(struct clocksource *arg)
  353. {
  354. phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
  355. union hv_reference_tsc_msr tsc_msr;
  356. /* Re-enable the TSC page */
  357. tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
  358. tsc_msr.enable = 1;
  359. tsc_msr.pfn = HVPFN_DOWN(phys_addr);
  360. hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
  361. }
  362. #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
  363. static int hv_cs_enable(struct clocksource *cs)
  364. {
  365. vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
  366. return 0;
  367. }
  368. #endif
  369. static struct clocksource hyperv_cs_tsc = {
  370. .name = "hyperv_clocksource_tsc_page",
  371. .rating = 500,
  372. .read = read_hv_clock_tsc_cs,
  373. .mask = CLOCKSOURCE_MASK(64),
  374. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  375. .suspend= suspend_hv_clock_tsc,
  376. .resume = resume_hv_clock_tsc,
  377. #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
  378. .enable = hv_cs_enable,
  379. .vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
  380. #else
  381. .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
  382. #endif
  383. };
  384. static u64 notrace read_hv_clock_msr(void)
  385. {
  386. /*
  387. * Read the partition counter to get the current tick count. This count
  388. * is set to 0 when the partition is created and is incremented in
  389. * 100 nanosecond units.
  390. */
  391. return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
  392. }
  393. static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
  394. {
  395. return read_hv_clock_msr();
  396. }
  397. static u64 notrace read_hv_sched_clock_msr(void)
  398. {
  399. return (read_hv_clock_msr() - hv_sched_clock_offset) *
  400. (NSEC_PER_SEC / HV_CLOCK_HZ);
  401. }
  402. static struct clocksource hyperv_cs_msr = {
  403. .name = "hyperv_clocksource_msr",
  404. .rating = 500,
  405. .read = read_hv_clock_msr_cs,
  406. .mask = CLOCKSOURCE_MASK(64),
  407. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  408. };
  409. /*
  410. * Reference to pv_ops must be inline so objtool
  411. * detection of noinstr violations can work correctly.
  412. */
  413. #ifdef CONFIG_GENERIC_SCHED_CLOCK
  414. static __always_inline void hv_setup_sched_clock(void *sched_clock)
  415. {
  416. /*
  417. * We're on an architecture with generic sched clock (not x86/x64).
  418. * The Hyper-V sched clock read function returns nanoseconds, not
  419. * the normal 100ns units of the Hyper-V synthetic clock.
  420. */
  421. sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
  422. }
  423. #elif defined CONFIG_PARAVIRT
  424. static __always_inline void hv_setup_sched_clock(void *sched_clock)
  425. {
  426. /* We're on x86/x64 *and* using PV ops */
  427. paravirt_set_sched_clock(sched_clock);
  428. }
  429. #else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
  430. static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
  431. #endif /* CONFIG_GENERIC_SCHED_CLOCK */
  432. static bool __init hv_init_tsc_clocksource(void)
  433. {
  434. union hv_reference_tsc_msr tsc_msr;
  435. phys_addr_t phys_addr;
  436. if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
  437. return false;
  438. if (hv_root_partition)
  439. return false;
  440. /*
  441. * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
  442. * handles frequency and offset changes due to live migration,
  443. * pause/resume, and other VM management operations. So lower the
  444. * Hyper-V Reference TSC rating, causing the generic TSC to be used.
  445. * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
  446. * TSC will be preferred over the virtualized ARM64 arch counter.
  447. * While the Hyper-V MSR clocksource won't be used since the
  448. * Reference TSC clocksource is present, change its rating as
  449. * well for consistency.
  450. */
  451. if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
  452. hyperv_cs_tsc.rating = 250;
  453. hyperv_cs_msr.rating = 250;
  454. }
  455. hv_read_reference_counter = read_hv_clock_tsc;
  456. phys_addr = virt_to_phys(hv_get_tsc_page());
  457. /*
  458. * The Hyper-V TLFS specifies to preserve the value of reserved
  459. * bits in registers. So read the existing value, preserve the
  460. * low order 12 bits, and add in the guest physical address
  461. * (which already has at least the low 12 bits set to zero since
  462. * it is page aligned). Also set the "enable" bit, which is bit 0.
  463. */
  464. tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
  465. tsc_msr.enable = 1;
  466. tsc_msr.pfn = HVPFN_DOWN(phys_addr);
  467. hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
  468. clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
  469. hv_sched_clock_offset = hv_read_reference_counter();
  470. hv_setup_sched_clock(read_hv_sched_clock_tsc);
  471. return true;
  472. }
  473. void __init hv_init_clocksource(void)
  474. {
  475. /*
  476. * Try to set up the TSC page clocksource. If it succeeds, we're
  477. * done. Otherwise, set up the MSR clocksource. At least one of
  478. * these will always be available except on very old versions of
  479. * Hyper-V on x86. In that case we won't have a Hyper-V
  480. * clocksource, but Linux will still run with a clocksource based
  481. * on the emulated PIT or LAPIC timer.
  482. */
  483. if (hv_init_tsc_clocksource())
  484. return;
  485. if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
  486. return;
  487. hv_read_reference_counter = read_hv_clock_msr;
  488. clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
  489. hv_sched_clock_offset = hv_read_reference_counter();
  490. hv_setup_sched_clock(read_hv_sched_clock_msr);
  491. }