vcpu_timer.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Atish Patra <[email protected]>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/err.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/uaccess.h>
  12. #include <clocksource/timer-riscv.h>
  13. #include <asm/csr.h>
  14. #include <asm/delay.h>
  15. #include <asm/kvm_vcpu_timer.h>
  16. static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
  17. {
  18. return get_cycles64() + gt->time_delta;
  19. }
  20. static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
  21. struct kvm_guest_timer *gt,
  22. struct kvm_vcpu_timer *t)
  23. {
  24. unsigned long flags;
  25. u64 cycles_now, cycles_delta, delta_ns;
  26. local_irq_save(flags);
  27. cycles_now = kvm_riscv_current_cycles(gt);
  28. if (cycles_now < cycles)
  29. cycles_delta = cycles - cycles_now;
  30. else
  31. cycles_delta = 0;
  32. delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
  33. local_irq_restore(flags);
  34. return delta_ns;
  35. }
  36. static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
  37. {
  38. u64 delta_ns;
  39. struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
  40. struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
  41. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  42. if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
  43. delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
  44. hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
  45. return HRTIMER_RESTART;
  46. }
  47. t->next_set = false;
  48. kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
  49. return HRTIMER_NORESTART;
  50. }
  51. static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
  52. {
  53. if (!t->init_done || !t->next_set)
  54. return -EINVAL;
  55. hrtimer_cancel(&t->hrt);
  56. t->next_set = false;
  57. return 0;
  58. }
  59. static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
  60. {
  61. #if defined(CONFIG_32BIT)
  62. csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
  63. csr_write(CSR_VSTIMECMPH, ncycles >> 32);
  64. #else
  65. csr_write(CSR_VSTIMECMP, ncycles);
  66. #endif
  67. return 0;
  68. }
  69. static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
  70. {
  71. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  72. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  73. u64 delta_ns;
  74. if (!t->init_done)
  75. return -EINVAL;
  76. kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
  77. delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
  78. t->next_cycles = ncycles;
  79. hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
  80. t->next_set = true;
  81. return 0;
  82. }
  83. int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
  84. {
  85. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  86. return t->timer_next_event(vcpu, ncycles);
  87. }
  88. static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
  89. {
  90. u64 delta_ns;
  91. struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
  92. struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
  93. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  94. if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
  95. delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
  96. hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
  97. return HRTIMER_RESTART;
  98. }
  99. t->next_set = false;
  100. kvm_vcpu_kick(vcpu);
  101. return HRTIMER_NORESTART;
  102. }
  103. bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
  104. {
  105. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  106. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  107. if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
  108. kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
  109. return true;
  110. else
  111. return false;
  112. }
  113. static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
  114. {
  115. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  116. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  117. u64 delta_ns;
  118. if (!t->init_done)
  119. return;
  120. delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
  121. hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
  122. t->next_set = true;
  123. }
  124. static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
  125. {
  126. kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
  127. }
  128. int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
  129. const struct kvm_one_reg *reg)
  130. {
  131. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  132. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  133. u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
  134. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  135. KVM_REG_SIZE_MASK |
  136. KVM_REG_RISCV_TIMER);
  137. u64 reg_val;
  138. if (KVM_REG_SIZE(reg->id) != sizeof(u64))
  139. return -EINVAL;
  140. if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
  141. return -EINVAL;
  142. switch (reg_num) {
  143. case KVM_REG_RISCV_TIMER_REG(frequency):
  144. reg_val = riscv_timebase;
  145. break;
  146. case KVM_REG_RISCV_TIMER_REG(time):
  147. reg_val = kvm_riscv_current_cycles(gt);
  148. break;
  149. case KVM_REG_RISCV_TIMER_REG(compare):
  150. reg_val = t->next_cycles;
  151. break;
  152. case KVM_REG_RISCV_TIMER_REG(state):
  153. reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
  154. KVM_RISCV_TIMER_STATE_OFF;
  155. break;
  156. default:
  157. return -EINVAL;
  158. }
  159. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  160. return -EFAULT;
  161. return 0;
  162. }
  163. int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
  164. const struct kvm_one_reg *reg)
  165. {
  166. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  167. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  168. u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
  169. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  170. KVM_REG_SIZE_MASK |
  171. KVM_REG_RISCV_TIMER);
  172. u64 reg_val;
  173. int ret = 0;
  174. if (KVM_REG_SIZE(reg->id) != sizeof(u64))
  175. return -EINVAL;
  176. if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
  177. return -EINVAL;
  178. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  179. return -EFAULT;
  180. switch (reg_num) {
  181. case KVM_REG_RISCV_TIMER_REG(frequency):
  182. ret = -EOPNOTSUPP;
  183. break;
  184. case KVM_REG_RISCV_TIMER_REG(time):
  185. gt->time_delta = reg_val - get_cycles64();
  186. break;
  187. case KVM_REG_RISCV_TIMER_REG(compare):
  188. t->next_cycles = reg_val;
  189. break;
  190. case KVM_REG_RISCV_TIMER_REG(state):
  191. if (reg_val == KVM_RISCV_TIMER_STATE_ON)
  192. ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
  193. else
  194. ret = kvm_riscv_vcpu_timer_cancel(t);
  195. break;
  196. default:
  197. ret = -EINVAL;
  198. break;
  199. }
  200. return ret;
  201. }
  202. int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
  203. {
  204. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  205. if (t->init_done)
  206. return -EINVAL;
  207. hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  208. t->init_done = true;
  209. t->next_set = false;
  210. /* Enable sstc for every vcpu if available in hardware */
  211. if (riscv_isa_extension_available(NULL, SSTC)) {
  212. t->sstc_enabled = true;
  213. t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
  214. t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
  215. } else {
  216. t->sstc_enabled = false;
  217. t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
  218. t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
  219. }
  220. return 0;
  221. }
  222. int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
  223. {
  224. int ret;
  225. ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
  226. vcpu->arch.timer.init_done = false;
  227. return ret;
  228. }
  229. int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
  230. {
  231. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  232. t->next_cycles = -1ULL;
  233. return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
  234. }
  235. static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
  236. {
  237. struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
  238. #if defined(CONFIG_32BIT)
  239. csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
  240. csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
  241. #else
  242. csr_write(CSR_HTIMEDELTA, gt->time_delta);
  243. #endif
  244. }
  245. void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
  246. {
  247. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  248. kvm_riscv_vcpu_update_timedelta(vcpu);
  249. if (!t->sstc_enabled)
  250. return;
  251. #if defined(CONFIG_32BIT)
  252. csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
  253. csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
  254. #else
  255. csr_write(CSR_VSTIMECMP, t->next_cycles);
  256. #endif
  257. /* timer should be enabled for the remaining operations */
  258. if (unlikely(!t->init_done))
  259. return;
  260. kvm_riscv_vcpu_timer_unblocking(vcpu);
  261. }
  262. void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
  263. {
  264. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  265. if (!t->sstc_enabled)
  266. return;
  267. #if defined(CONFIG_32BIT)
  268. t->next_cycles = csr_read(CSR_VSTIMECMP);
  269. t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
  270. #else
  271. t->next_cycles = csr_read(CSR_VSTIMECMP);
  272. #endif
  273. }
  274. void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
  275. {
  276. struct kvm_vcpu_timer *t = &vcpu->arch.timer;
  277. if (!t->sstc_enabled)
  278. return;
  279. /*
  280. * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
  281. * upon every VM exit so no need to save here.
  282. */
  283. /* timer should be enabled for the remaining operations */
  284. if (unlikely(!t->init_done))
  285. return;
  286. if (kvm_vcpu_is_blocking(vcpu))
  287. kvm_riscv_vcpu_timer_blocking(vcpu);
  288. }
  289. void kvm_riscv_guest_timer_init(struct kvm *kvm)
  290. {
  291. struct kvm_guest_timer *gt = &kvm->arch.timer;
  292. riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
  293. gt->time_delta = -get_cycles64();
  294. }