modules.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2022 Google LLC
  4. */
  5. #include <asm/kvm_host.h>
  6. #include <asm/kvm_pkvm_module.h>
  7. #include <nvhe/mem_protect.h>
  8. #include <nvhe/modules.h>
  9. #include <nvhe/mm.h>
  10. #include <nvhe/serial.h>
  11. #include <nvhe/spinlock.h>
  12. #include <nvhe/trap_handler.h>
  13. static void __kvm_flush_dcache_to_poc(void *addr, size_t size)
  14. {
  15. kvm_flush_dcache_to_poc((unsigned long)addr, (unsigned long)size);
  16. }
  17. static void __update_hcr_el2(unsigned long set_mask, unsigned long clear_mask)
  18. {
  19. struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
  20. params->hcr_el2 |= set_mask;
  21. params->hcr_el2 &= ~clear_mask;
  22. __kvm_flush_dcache_to_poc(params, sizeof(*params));
  23. write_sysreg(params->hcr_el2, hcr_el2);
  24. }
  25. static void __update_hfgwtr_el2(unsigned long set_mask, unsigned long clear_mask)
  26. {
  27. struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
  28. params->hfgwtr_el2 |= set_mask;
  29. params->hfgwtr_el2 &= ~clear_mask;
  30. __kvm_flush_dcache_to_poc(params, sizeof(*params));
  31. write_sysreg_s(params->hfgwtr_el2, SYS_HFGWTR_EL2);
  32. }
  33. static atomic_t early_lm_pages;
  34. static void *__pkvm_linear_map_early(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot)
  35. {
  36. void *addr = NULL;
  37. int ret;
  38. if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
  39. return NULL;
  40. addr = __hyp_va(phys);
  41. ret = pkvm_create_mappings(addr, addr + size, prot);
  42. if (ret)
  43. addr = NULL;
  44. else
  45. atomic_add(size, &early_lm_pages);
  46. return addr;
  47. }
  48. static void __pkvm_linear_unmap_early(void *addr, size_t size)
  49. {
  50. pkvm_remove_mappings(addr, addr + size);
  51. atomic_sub(size, &early_lm_pages);
  52. }
  53. void __pkvm_close_module_registration(void)
  54. {
  55. /*
  56. * Page ownership tracking might go out of sync if there are stale
  57. * entries in pKVM's linear map range, so they must really be gone by
  58. * now.
  59. */
  60. WARN_ON_ONCE(atomic_read(&early_lm_pages));
  61. /*
  62. * Nothing else to do, module loading HVCs are only accessible before
  63. * deprivilege
  64. */
  65. }
  66. static int __pkvm_module_host_donate_hyp(u64 pfn, u64 nr_pages)
  67. {
  68. return ___pkvm_host_donate_hyp(pfn, nr_pages, true);
  69. }
  70. const struct pkvm_module_ops module_ops = {
  71. .create_private_mapping = __pkvm_create_private_mapping,
  72. .alloc_module_va = __pkvm_alloc_module_va,
  73. .map_module_page = __pkvm_map_module_page,
  74. .register_serial_driver = __pkvm_register_serial_driver,
  75. .puts = hyp_puts,
  76. .putx64 = hyp_putx64,
  77. .fixmap_map = hyp_fixmap_map,
  78. .fixmap_unmap = hyp_fixmap_unmap,
  79. .linear_map_early = __pkvm_linear_map_early,
  80. .linear_unmap_early = __pkvm_linear_unmap_early,
  81. .flush_dcache_to_poc = __kvm_flush_dcache_to_poc,
  82. .update_hcr_el2 = __update_hcr_el2,
  83. .update_hfgwtr_el2 = __update_hfgwtr_el2,
  84. .register_host_perm_fault_handler = hyp_register_host_perm_fault_handler,
  85. .host_stage2_mod_prot = module_change_host_page_prot,
  86. .host_stage2_get_leaf = host_stage2_get_leaf,
  87. .register_host_smc_handler = __pkvm_register_host_smc_handler,
  88. .register_default_trap_handler = __pkvm_register_default_trap_handler,
  89. .register_illegal_abt_notifier = __pkvm_register_illegal_abt_notifier,
  90. .register_psci_notifier = __pkvm_register_psci_notifier,
  91. .register_hyp_panic_notifier = __pkvm_register_hyp_panic_notifier,
  92. .host_donate_hyp = __pkvm_module_host_donate_hyp,
  93. .hyp_donate_host = __pkvm_hyp_donate_host,
  94. .host_share_hyp = __pkvm_host_share_hyp,
  95. .host_unshare_hyp = __pkvm_host_unshare_hyp,
  96. .pin_shared_mem = hyp_pin_shared_mem,
  97. .unpin_shared_mem = hyp_unpin_shared_mem,
  98. .memcpy = memcpy,
  99. .memset = memset,
  100. .hyp_pa = hyp_virt_to_phys,
  101. .hyp_va = hyp_phys_to_virt,
  102. .kern_hyp_va = __kern_hyp_va,
  103. .host_stage2_mod_prot_range = module_change_host_page_prot_range,
  104. };
  105. int __pkvm_init_module(void *module_init)
  106. {
  107. int (*do_module_init)(const struct pkvm_module_ops *ops) = module_init;
  108. return do_module_init(&module_ops);
  109. }
  110. #define MAX_DYNAMIC_HCALLS 128
  111. atomic_t num_dynamic_hcalls = ATOMIC_INIT(0);
  112. DEFINE_HYP_SPINLOCK(dyn_hcall_lock);
  113. static dyn_hcall_t host_dynamic_hcalls[MAX_DYNAMIC_HCALLS];
  114. int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt)
  115. {
  116. DECLARE_REG(unsigned long, id, host_ctxt, 0);
  117. dyn_hcall_t hfn;
  118. int dyn_id;
  119. /*
  120. * TODO: static key to protect when no dynamic hcall is registered?
  121. */
  122. dyn_id = (int)(id - KVM_HOST_SMCCC_ID(0)) -
  123. __KVM_HOST_SMCCC_FUNC___dynamic_hcalls;
  124. if (dyn_id < 0)
  125. return HCALL_UNHANDLED;
  126. cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
  127. /*
  128. * Order access to num_dynamic_hcalls and host_dynamic_hcalls. Paired
  129. * with __pkvm_register_hcall().
  130. */
  131. if (dyn_id >= atomic_read_acquire(&num_dynamic_hcalls))
  132. goto end;
  133. hfn = READ_ONCE(host_dynamic_hcalls[dyn_id]);
  134. if (!hfn)
  135. goto end;
  136. cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
  137. hfn(host_ctxt);
  138. end:
  139. return HCALL_HANDLED;
  140. }
  141. int __pkvm_register_hcall(unsigned long hvn_hyp_va)
  142. {
  143. dyn_hcall_t hfn = (void *)hvn_hyp_va;
  144. int reserved_id, ret;
  145. assert_in_mod_range(hvn_hyp_va);
  146. hyp_spin_lock(&dyn_hcall_lock);
  147. reserved_id = atomic_read(&num_dynamic_hcalls);
  148. if (reserved_id >= MAX_DYNAMIC_HCALLS) {
  149. ret = -ENOMEM;
  150. goto err_hcall_unlock;
  151. }
  152. WRITE_ONCE(host_dynamic_hcalls[reserved_id], hfn);
  153. /*
  154. * Order access to num_dynamic_hcalls and host_dynamic_hcalls. Paired
  155. * with handle_host_dynamic_hcall.
  156. */
  157. atomic_set_release(&num_dynamic_hcalls, reserved_id + 1);
  158. ret = reserved_id + __KVM_HOST_SMCCC_FUNC___dynamic_hcalls;
  159. err_hcall_unlock:
  160. hyp_spin_unlock(&dyn_hcall_lock);
  161. return ret;
  162. };