psci.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <linux/arm-smccc.h>
  7. #include <linux/preempt.h>
  8. #include <linux/kvm_host.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/wait.h>
  11. #include <asm/cputype.h>
  12. #include <asm/kvm_emulate.h>
  13. #include <kvm/arm_psci.h>
  14. #include <kvm/arm_hypercalls.h>
  15. /*
  16. * This is an implementation of the Power State Coordination Interface
  17. * as described in ARM document number ARM DEN 0022A.
  18. */
  19. static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
  20. {
  21. /*
  22. * NOTE: For simplicity, we make VCPU suspend emulation to be
  23. * same-as WFI (Wait-for-interrupt) emulation.
  24. *
  25. * This means for KVM the wakeup events are interrupts and
  26. * this is consistent with intended use of StateID as described
  27. * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
  28. *
  29. * Further, we also treat power-down request to be same as
  30. * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
  31. * specification (ARM DEN 0022A). This means all suspend states
  32. * for KVM will preserve the register state.
  33. */
  34. kvm_vcpu_wfi(vcpu);
  35. return PSCI_RET_SUCCESS;
  36. }
  37. static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
  38. {
  39. struct vcpu_reset_state *reset_state;
  40. struct kvm *kvm = source_vcpu->kvm;
  41. struct kvm_vcpu *vcpu = NULL;
  42. int ret = PSCI_RET_SUCCESS;
  43. unsigned long cpu_id;
  44. cpu_id = smccc_get_arg1(source_vcpu);
  45. if (!kvm_psci_valid_affinity(source_vcpu, cpu_id))
  46. return PSCI_RET_INVALID_PARAMS;
  47. vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
  48. /*
  49. * Make sure the caller requested a valid CPU and that the CPU is
  50. * turned off.
  51. */
  52. if (!vcpu)
  53. return PSCI_RET_INVALID_PARAMS;
  54. spin_lock(&vcpu->arch.mp_state_lock);
  55. if (!kvm_arm_vcpu_stopped(vcpu)) {
  56. if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
  57. ret = PSCI_RET_ALREADY_ON;
  58. else
  59. ret = PSCI_RET_INVALID_PARAMS;
  60. goto out_unlock;
  61. }
  62. reset_state = &vcpu->arch.reset_state;
  63. reset_state->pc = smccc_get_arg2(source_vcpu);
  64. /* Propagate caller endianness */
  65. reset_state->be = kvm_vcpu_is_be(source_vcpu);
  66. /*
  67. * NOTE: We always update r0 (or x0) because for PSCI v0.1
  68. * the general purpose registers are undefined upon CPU_ON.
  69. */
  70. reset_state->r0 = smccc_get_arg3(source_vcpu);
  71. reset_state->reset = true;
  72. kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
  73. /*
  74. * Make sure the reset request is observed if the RUNNABLE mp_state is
  75. * observed.
  76. */
  77. smp_wmb();
  78. vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
  79. kvm_vcpu_wake_up(vcpu);
  80. out_unlock:
  81. spin_unlock(&vcpu->arch.mp_state_lock);
  82. return ret;
  83. }
  84. static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
  85. {
  86. int matching_cpus = 0;
  87. unsigned long i, mpidr;
  88. unsigned long target_affinity;
  89. unsigned long target_affinity_mask;
  90. unsigned long lowest_affinity_level;
  91. struct kvm *kvm = vcpu->kvm;
  92. struct kvm_vcpu *tmp;
  93. target_affinity = smccc_get_arg1(vcpu);
  94. lowest_affinity_level = smccc_get_arg2(vcpu);
  95. if (!kvm_psci_valid_affinity(vcpu, target_affinity))
  96. return PSCI_RET_INVALID_PARAMS;
  97. /* Determine target affinity mask */
  98. target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
  99. if (!target_affinity_mask)
  100. return PSCI_RET_INVALID_PARAMS;
  101. /* Ignore other bits of target affinity */
  102. target_affinity &= target_affinity_mask;
  103. /*
  104. * If one or more VCPU matching target affinity are running
  105. * then ON else OFF
  106. */
  107. kvm_for_each_vcpu(i, tmp, kvm) {
  108. mpidr = kvm_vcpu_get_mpidr_aff(tmp);
  109. if ((mpidr & target_affinity_mask) == target_affinity) {
  110. matching_cpus++;
  111. if (!kvm_arm_vcpu_stopped(tmp))
  112. return PSCI_0_2_AFFINITY_LEVEL_ON;
  113. }
  114. }
  115. if (!matching_cpus)
  116. return PSCI_RET_INVALID_PARAMS;
  117. return PSCI_0_2_AFFINITY_LEVEL_OFF;
  118. }
  119. static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
  120. {
  121. unsigned long i;
  122. struct kvm_vcpu *tmp;
  123. /*
  124. * The KVM ABI specifies that a system event exit may call KVM_RUN
  125. * again and may perform shutdown/reboot at a later time that when the
  126. * actual request is made. Since we are implementing PSCI and a
  127. * caller of PSCI reboot and shutdown expects that the system shuts
  128. * down or reboots immediately, let's make sure that VCPUs are not run
  129. * after this call is handled and before the VCPUs have been
  130. * re-initialized.
  131. */
  132. kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
  133. spin_lock(&tmp->arch.mp_state_lock);
  134. WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
  135. spin_unlock(&tmp->arch.mp_state_lock);
  136. }
  137. kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
  138. memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
  139. vcpu->run->system_event.type = type;
  140. vcpu->run->system_event.ndata = 1;
  141. vcpu->run->system_event.data[0] = flags;
  142. vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  143. }
  144. static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
  145. {
  146. kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN, 0);
  147. }
  148. static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
  149. {
  150. kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, 0);
  151. }
  152. static void kvm_psci_system_reset2(struct kvm_vcpu *vcpu)
  153. {
  154. kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET,
  155. KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2);
  156. }
  157. static void kvm_psci_system_suspend(struct kvm_vcpu *vcpu)
  158. {
  159. struct kvm_run *run = vcpu->run;
  160. memset(&run->system_event, 0, sizeof(vcpu->run->system_event));
  161. run->system_event.type = KVM_SYSTEM_EVENT_SUSPEND;
  162. run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  163. }
  164. static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
  165. {
  166. /*
  167. * Prevent 32 bit guests from calling 64 bit PSCI functions.
  168. */
  169. if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
  170. return PSCI_RET_NOT_SUPPORTED;
  171. return 0;
  172. }
  173. static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
  174. {
  175. u32 psci_fn = smccc_get_function(vcpu);
  176. unsigned long val;
  177. int ret = 1;
  178. switch (psci_fn) {
  179. case PSCI_0_2_FN_PSCI_VERSION:
  180. /*
  181. * Bits[31:16] = Major Version = 0
  182. * Bits[15:0] = Minor Version = 2
  183. */
  184. val = KVM_ARM_PSCI_0_2;
  185. break;
  186. case PSCI_0_2_FN_CPU_SUSPEND:
  187. case PSCI_0_2_FN64_CPU_SUSPEND:
  188. val = kvm_psci_vcpu_suspend(vcpu);
  189. break;
  190. case PSCI_0_2_FN_CPU_OFF:
  191. kvm_arm_vcpu_power_off(vcpu);
  192. val = PSCI_RET_SUCCESS;
  193. break;
  194. case PSCI_0_2_FN_CPU_ON:
  195. kvm_psci_narrow_to_32bit(vcpu);
  196. fallthrough;
  197. case PSCI_0_2_FN64_CPU_ON:
  198. val = kvm_psci_vcpu_on(vcpu);
  199. break;
  200. case PSCI_0_2_FN_AFFINITY_INFO:
  201. kvm_psci_narrow_to_32bit(vcpu);
  202. fallthrough;
  203. case PSCI_0_2_FN64_AFFINITY_INFO:
  204. val = kvm_psci_vcpu_affinity_info(vcpu);
  205. break;
  206. case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
  207. /*
  208. * Trusted OS is MP hence does not require migration
  209. * or
  210. * Trusted OS is not present
  211. */
  212. val = PSCI_0_2_TOS_MP;
  213. break;
  214. case PSCI_0_2_FN_SYSTEM_OFF:
  215. kvm_psci_system_off(vcpu);
  216. /*
  217. * We shouldn't be going back to guest VCPU after
  218. * receiving SYSTEM_OFF request.
  219. *
  220. * If user space accidentally/deliberately resumes
  221. * guest VCPU after SYSTEM_OFF request then guest
  222. * VCPU should see internal failure from PSCI return
  223. * value. To achieve this, we preload r0 (or x0) with
  224. * PSCI return value INTERNAL_FAILURE.
  225. */
  226. val = PSCI_RET_INTERNAL_FAILURE;
  227. ret = 0;
  228. break;
  229. case PSCI_0_2_FN_SYSTEM_RESET:
  230. kvm_psci_system_reset(vcpu);
  231. /*
  232. * Same reason as SYSTEM_OFF for preloading r0 (or x0)
  233. * with PSCI return value INTERNAL_FAILURE.
  234. */
  235. val = PSCI_RET_INTERNAL_FAILURE;
  236. ret = 0;
  237. break;
  238. default:
  239. val = PSCI_RET_NOT_SUPPORTED;
  240. break;
  241. }
  242. smccc_set_retval(vcpu, val, 0, 0, 0);
  243. return ret;
  244. }
  245. static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
  246. {
  247. unsigned long val = PSCI_RET_NOT_SUPPORTED;
  248. u32 psci_fn = smccc_get_function(vcpu);
  249. struct kvm *kvm = vcpu->kvm;
  250. u32 arg;
  251. int ret = 1;
  252. switch(psci_fn) {
  253. case PSCI_0_2_FN_PSCI_VERSION:
  254. val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
  255. break;
  256. case PSCI_1_0_FN_PSCI_FEATURES:
  257. arg = smccc_get_arg1(vcpu);
  258. val = kvm_psci_check_allowed_function(vcpu, arg);
  259. if (val)
  260. break;
  261. val = PSCI_RET_NOT_SUPPORTED;
  262. switch(arg) {
  263. case PSCI_0_2_FN_PSCI_VERSION:
  264. case PSCI_0_2_FN_CPU_SUSPEND:
  265. case PSCI_0_2_FN64_CPU_SUSPEND:
  266. case PSCI_0_2_FN_CPU_OFF:
  267. case PSCI_0_2_FN_CPU_ON:
  268. case PSCI_0_2_FN64_CPU_ON:
  269. case PSCI_0_2_FN_AFFINITY_INFO:
  270. case PSCI_0_2_FN64_AFFINITY_INFO:
  271. case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
  272. case PSCI_0_2_FN_SYSTEM_OFF:
  273. case PSCI_0_2_FN_SYSTEM_RESET:
  274. case PSCI_1_0_FN_PSCI_FEATURES:
  275. case ARM_SMCCC_VERSION_FUNC_ID:
  276. val = 0;
  277. break;
  278. case PSCI_1_0_FN_SYSTEM_SUSPEND:
  279. case PSCI_1_0_FN64_SYSTEM_SUSPEND:
  280. if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags))
  281. val = 0;
  282. break;
  283. case PSCI_1_1_FN_SYSTEM_RESET2:
  284. case PSCI_1_1_FN64_SYSTEM_RESET2:
  285. if (minor >= 1)
  286. val = 0;
  287. break;
  288. }
  289. break;
  290. case PSCI_1_0_FN_SYSTEM_SUSPEND:
  291. kvm_psci_narrow_to_32bit(vcpu);
  292. fallthrough;
  293. case PSCI_1_0_FN64_SYSTEM_SUSPEND:
  294. /*
  295. * Return directly to userspace without changing the vCPU's
  296. * registers. Userspace depends on reading the SMCCC parameters
  297. * to implement SYSTEM_SUSPEND.
  298. */
  299. if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags)) {
  300. kvm_psci_system_suspend(vcpu);
  301. return 0;
  302. }
  303. break;
  304. case PSCI_1_1_FN_SYSTEM_RESET2:
  305. kvm_psci_narrow_to_32bit(vcpu);
  306. fallthrough;
  307. case PSCI_1_1_FN64_SYSTEM_RESET2:
  308. if (minor >= 1) {
  309. arg = smccc_get_arg1(vcpu);
  310. if (arg <= PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET ||
  311. arg >= PSCI_1_1_RESET_TYPE_VENDOR_START) {
  312. kvm_psci_system_reset2(vcpu);
  313. vcpu_set_reg(vcpu, 0, PSCI_RET_INTERNAL_FAILURE);
  314. return 0;
  315. }
  316. val = PSCI_RET_INVALID_PARAMS;
  317. break;
  318. }
  319. break;
  320. default:
  321. return kvm_psci_0_2_call(vcpu);
  322. }
  323. smccc_set_retval(vcpu, val, 0, 0, 0);
  324. return ret;
  325. }
  326. static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  327. {
  328. u32 psci_fn = smccc_get_function(vcpu);
  329. unsigned long val;
  330. switch (psci_fn) {
  331. case KVM_PSCI_FN_CPU_OFF:
  332. kvm_arm_vcpu_power_off(vcpu);
  333. val = PSCI_RET_SUCCESS;
  334. break;
  335. case KVM_PSCI_FN_CPU_ON:
  336. val = kvm_psci_vcpu_on(vcpu);
  337. break;
  338. default:
  339. val = PSCI_RET_NOT_SUPPORTED;
  340. break;
  341. }
  342. smccc_set_retval(vcpu, val, 0, 0, 0);
  343. return 1;
  344. }
  345. /**
  346. * kvm_psci_call - handle PSCI call if r0 value is in range
  347. * @vcpu: Pointer to the VCPU struct
  348. *
  349. * Handle PSCI calls from guests through traps from HVC instructions.
  350. * The calling convention is similar to SMC calls to the secure world
  351. * where the function number is placed in r0.
  352. *
  353. * This function returns: > 0 (success), 0 (success but exit to user
  354. * space), and < 0 (errors)
  355. *
  356. * Errors:
  357. * -EINVAL: Unrecognized PSCI function
  358. */
  359. int kvm_psci_call(struct kvm_vcpu *vcpu)
  360. {
  361. u32 psci_fn = smccc_get_function(vcpu);
  362. unsigned long val;
  363. val = kvm_psci_check_allowed_function(vcpu, psci_fn);
  364. if (val) {
  365. smccc_set_retval(vcpu, val, 0, 0, 0);
  366. return 1;
  367. }
  368. switch (kvm_psci_version(vcpu)) {
  369. case KVM_ARM_PSCI_1_1:
  370. return kvm_psci_1_x_call(vcpu, 1);
  371. case KVM_ARM_PSCI_1_0:
  372. return kvm_psci_1_x_call(vcpu, 0);
  373. case KVM_ARM_PSCI_0_2:
  374. return kvm_psci_0_2_call(vcpu);
  375. case KVM_ARM_PSCI_0_1:
  376. return kvm_psci_0_1_call(vcpu);
  377. default:
  378. return -EINVAL;
  379. }
  380. }