pvtime.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2019 Arm Ltd.
  3. #include <linux/arm-smccc.h>
  4. #include <linux/kvm_host.h>
  5. #include <linux/sched/stat.h>
  6. #include <asm/kvm_mmu.h>
  7. #include <asm/pvclock-abi.h>
  8. #include <kvm/arm_hypercalls.h>
  9. void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
  10. {
  11. struct kvm *kvm = vcpu->kvm;
  12. u64 base = vcpu->arch.steal.base;
  13. u64 last_steal = vcpu->arch.steal.last_steal;
  14. u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
  15. u64 steal = 0;
  16. int idx;
  17. if (base == GPA_INVALID)
  18. return;
  19. idx = srcu_read_lock(&kvm->srcu);
  20. if (!kvm_get_guest(kvm, base + offset, steal)) {
  21. steal = le64_to_cpu(steal);
  22. vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
  23. steal += vcpu->arch.steal.last_steal - last_steal;
  24. kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
  25. }
  26. srcu_read_unlock(&kvm->srcu, idx);
  27. }
  28. long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
  29. {
  30. u32 feature = smccc_get_arg1(vcpu);
  31. long val = SMCCC_RET_NOT_SUPPORTED;
  32. switch (feature) {
  33. case ARM_SMCCC_HV_PV_TIME_FEATURES:
  34. case ARM_SMCCC_HV_PV_TIME_ST:
  35. if (vcpu->arch.steal.base != GPA_INVALID)
  36. val = SMCCC_RET_SUCCESS;
  37. break;
  38. }
  39. return val;
  40. }
  41. gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
  42. {
  43. struct pvclock_vcpu_stolen_time init_values = {};
  44. struct kvm *kvm = vcpu->kvm;
  45. u64 base = vcpu->arch.steal.base;
  46. if (base == GPA_INVALID)
  47. return base;
  48. /*
  49. * Start counting stolen time from the time the guest requests
  50. * the feature enabled.
  51. */
  52. vcpu->arch.steal.last_steal = current->sched_info.run_delay;
  53. kvm_write_guest_lock(kvm, base, &init_values, sizeof(init_values));
  54. return base;
  55. }
  56. bool kvm_arm_pvtime_supported(void)
  57. {
  58. return !!sched_info_on();
  59. }
  60. int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
  61. struct kvm_device_attr *attr)
  62. {
  63. u64 __user *user = (u64 __user *)attr->addr;
  64. struct kvm *kvm = vcpu->kvm;
  65. u64 ipa;
  66. int ret = 0;
  67. int idx;
  68. if (!kvm_arm_pvtime_supported() ||
  69. attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
  70. return -ENXIO;
  71. if (get_user(ipa, user))
  72. return -EFAULT;
  73. if (!IS_ALIGNED(ipa, 64))
  74. return -EINVAL;
  75. if (vcpu->arch.steal.base != GPA_INVALID)
  76. return -EEXIST;
  77. /* Check the address is in a valid memslot */
  78. idx = srcu_read_lock(&kvm->srcu);
  79. if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
  80. ret = -EINVAL;
  81. srcu_read_unlock(&kvm->srcu, idx);
  82. if (!ret)
  83. vcpu->arch.steal.base = ipa;
  84. return ret;
  85. }
  86. int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
  87. struct kvm_device_attr *attr)
  88. {
  89. u64 __user *user = (u64 __user *)attr->addr;
  90. u64 ipa;
  91. if (!kvm_arm_pvtime_supported() ||
  92. attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
  93. return -ENXIO;
  94. ipa = vcpu->arch.steal.base;
  95. if (put_user(ipa, user))
  96. return -EFAULT;
  97. return 0;
  98. }
  99. int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
  100. struct kvm_device_attr *attr)
  101. {
  102. switch (attr->attr) {
  103. case KVM_ARM_VCPU_PVTIME_IPA:
  104. if (kvm_arm_pvtime_supported())
  105. return 0;
  106. }
  107. return -ENXIO;
  108. }