setup.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 Google LLC
  4. * Author: Quentin Perret <[email protected]>
  5. */
  6. #include <linux/kvm_host.h>
  7. #include <asm/kvm_hyp.h>
  8. #include <asm/kvm_mmu.h>
  9. #include <asm/kvm_pgtable.h>
  10. #include <asm/kvm_pkvm.h>
  11. #include <nvhe/early_alloc.h>
  12. #include <nvhe/ffa.h>
  13. #include <nvhe/gfp.h>
  14. #include <nvhe/iommu.h>
  15. #include <nvhe/memory.h>
  16. #include <nvhe/mem_protect.h>
  17. #include <nvhe/mm.h>
  18. #include <nvhe/pkvm.h>
  19. #include <nvhe/serial.h>
  20. #include <nvhe/trap_handler.h>
  21. unsigned long hyp_nr_cpus;
  22. phys_addr_t pvmfw_base;
  23. phys_addr_t pvmfw_size;
  24. #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
  25. (unsigned long)__per_cpu_start)
  26. static void *vmemmap_base;
  27. static void *vm_table_base;
  28. static void *hyp_pgt_base;
  29. static void *host_s2_pgt_base;
  30. static void *ffa_proxy_pages;
  31. static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
  32. static struct hyp_pool hpool;
  33. static int divide_memory_pool(void *virt, unsigned long size)
  34. {
  35. unsigned long nr_pages;
  36. hyp_early_alloc_init(virt, size);
  37. nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
  38. vmemmap_base = hyp_early_alloc_contig(nr_pages);
  39. if (!vmemmap_base)
  40. return -ENOMEM;
  41. nr_pages = hyp_vm_table_pages();
  42. vm_table_base = hyp_early_alloc_contig(nr_pages);
  43. if (!vm_table_base)
  44. return -ENOMEM;
  45. nr_pages = hyp_s1_pgtable_pages();
  46. hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
  47. if (!hyp_pgt_base)
  48. return -ENOMEM;
  49. nr_pages = host_s2_pgtable_pages();
  50. host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
  51. if (!host_s2_pgt_base)
  52. return -ENOMEM;
  53. nr_pages = hyp_ffa_proxy_pages();
  54. ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
  55. if (!ffa_proxy_pages)
  56. return -ENOMEM;
  57. return 0;
  58. }
  59. static int create_hyp_host_fp_mappings(void)
  60. {
  61. void *start, *end;
  62. int ret, i;
  63. for (i = 0; i < hyp_nr_cpus; i++) {
  64. start = (void *)kern_hyp_va(kvm_arm_hyp_host_fp_state[i]);
  65. end = start + PAGE_ALIGN(pkvm_host_fp_state_size());
  66. ret = pkvm_create_mappings(start, end, PAGE_HYP);
  67. if (ret)
  68. return ret;
  69. }
  70. return 0;
  71. }
  72. static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
  73. unsigned long *per_cpu_base,
  74. u32 hyp_va_bits)
  75. {
  76. void *start, *end, *virt = hyp_phys_to_virt(phys);
  77. unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
  78. enum kvm_pgtable_prot prot;
  79. int ret, i;
  80. /* Recreate the hyp page-table using the early page allocator */
  81. hyp_early_alloc_init(hyp_pgt_base, pgt_size);
  82. ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
  83. &hyp_early_alloc_mm_ops);
  84. if (ret)
  85. return ret;
  86. ret = hyp_create_idmap(hyp_va_bits);
  87. if (ret)
  88. return ret;
  89. ret = hyp_map_vectors();
  90. if (ret)
  91. return ret;
  92. ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
  93. if (ret)
  94. return ret;
  95. ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
  96. if (ret)
  97. return ret;
  98. ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
  99. if (ret)
  100. return ret;
  101. ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
  102. if (ret)
  103. return ret;
  104. ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
  105. if (ret)
  106. return ret;
  107. ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
  108. if (ret)
  109. return ret;
  110. for (i = 0; i < hyp_nr_cpus; i++) {
  111. struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
  112. unsigned long hyp_addr;
  113. start = (void *)kern_hyp_va(per_cpu_base[i]);
  114. end = start + PAGE_ALIGN(hyp_percpu_size);
  115. ret = pkvm_create_mappings(start, end, PAGE_HYP);
  116. if (ret)
  117. return ret;
  118. /*
  119. * Allocate a contiguous HYP private VA range for the stack
  120. * and guard page. The allocation is also aligned based on
  121. * the order of its size.
  122. */
  123. ret = pkvm_alloc_private_va_range(NVHE_STACK_SIZE * 2, &hyp_addr);
  124. if (ret)
  125. return ret;
  126. /*
  127. * Since the stack grows downwards, map the stack to the page
  128. * at the higher address and leave the lower guard page
  129. * unbacked.
  130. *
  131. * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
  132. * and addresses corresponding to the guard page have the
  133. * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
  134. */
  135. hyp_spin_lock(&pkvm_pgd_lock);
  136. ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + NVHE_STACK_SIZE,
  137. NVHE_STACK_SIZE, params->stack_pa, PAGE_HYP);
  138. hyp_spin_unlock(&pkvm_pgd_lock);
  139. if (ret)
  140. return ret;
  141. /* Update stack_hyp_va to end of the stack's private VA range */
  142. params->stack_hyp_va = hyp_addr + (2 * NVHE_STACK_SIZE);
  143. }
  144. create_hyp_host_fp_mappings();
  145. /*
  146. * Map the host sections RO in the hypervisor, but transfer the
  147. * ownership from the host to the hypervisor itself to make sure they
  148. * can't be donated or shared with another entity.
  149. *
  150. * The ownership transition requires matching changes in the host
  151. * stage-2. This will be done later (see finalize_host_mappings()) once
  152. * the hyp_vmemmap is addressable.
  153. */
  154. prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
  155. ret = pkvm_create_mappings(&kvm_vgic_global_state,
  156. &kvm_vgic_global_state + 1, prot);
  157. if (ret)
  158. return ret;
  159. start = hyp_phys_to_virt(pvmfw_base);
  160. end = start + pvmfw_size;
  161. prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_OWNED);
  162. ret = pkvm_create_mappings(start, end, prot);
  163. if (ret)
  164. return ret;
  165. return 0;
  166. }
  167. static void update_nvhe_init_params(void)
  168. {
  169. struct kvm_nvhe_init_params *params;
  170. unsigned long i;
  171. for (i = 0; i < hyp_nr_cpus; i++) {
  172. params = per_cpu_ptr(&kvm_init_params, i);
  173. params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
  174. dcache_clean_inval_poc((unsigned long)params,
  175. (unsigned long)params + sizeof(*params));
  176. }
  177. }
  178. static void *hyp_zalloc_hyp_page(void *arg)
  179. {
  180. return hyp_alloc_pages(&hpool, 0);
  181. }
  182. static void hpool_get_page(void *addr)
  183. {
  184. hyp_get_page(&hpool, addr);
  185. }
  186. static void hpool_put_page(void *addr)
  187. {
  188. hyp_put_page(&hpool, addr);
  189. }
  190. static int fix_host_ownership_walker(u64 addr, u64 end, u32 level,
  191. kvm_pte_t *ptep,
  192. enum kvm_pgtable_walk_flags flag,
  193. void * const arg)
  194. {
  195. enum kvm_pgtable_prot prot;
  196. enum pkvm_page_state state;
  197. kvm_pte_t pte = *ptep;
  198. phys_addr_t phys;
  199. if (!kvm_pte_valid(pte))
  200. return 0;
  201. if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
  202. return -EINVAL;
  203. phys = kvm_pte_to_phys(pte);
  204. if (!addr_is_memory(phys))
  205. return -EINVAL;
  206. /*
  207. * Adjust the host stage-2 mappings to match the ownership attributes
  208. * configured in the hypervisor stage-1.
  209. */
  210. state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
  211. switch (state) {
  212. case PKVM_PAGE_OWNED:
  213. return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
  214. case PKVM_PAGE_SHARED_OWNED:
  215. prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
  216. break;
  217. case PKVM_PAGE_SHARED_BORROWED:
  218. prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
  219. break;
  220. default:
  221. return -EINVAL;
  222. }
  223. return host_stage2_idmap_locked(phys, PAGE_SIZE, prot, false);
  224. }
  225. static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,
  226. kvm_pte_t *ptep,
  227. enum kvm_pgtable_walk_flags flag,
  228. void * const arg)
  229. {
  230. struct kvm_pgtable_mm_ops *mm_ops = arg;
  231. kvm_pte_t pte = *ptep;
  232. /*
  233. * Fix-up the refcount for the page-table pages as the early allocator
  234. * was unable to access the hyp_vmemmap and so the buddy allocator has
  235. * initialised the refcount to '1'.
  236. */
  237. if (kvm_pte_valid(pte))
  238. mm_ops->get_page(ptep);
  239. return 0;
  240. }
  241. static int pin_table_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
  242. enum kvm_pgtable_walk_flags flag, void * const arg)
  243. {
  244. struct kvm_pgtable_mm_ops *mm_ops = arg;
  245. kvm_pte_t pte = *ptep;
  246. if (kvm_pte_valid(pte))
  247. mm_ops->get_page(kvm_pte_follow(pte, mm_ops));
  248. return 0;
  249. }
  250. static int pin_host_tables(void)
  251. {
  252. struct kvm_pgtable_walker walker = {
  253. .cb = pin_table_walker,
  254. .flags = KVM_PGTABLE_WALK_TABLE_POST,
  255. .arg = &host_mmu.mm_ops,
  256. };
  257. return kvm_pgtable_walk(&host_mmu.pgt, 0, BIT(host_mmu.pgt.ia_bits), &walker);
  258. }
  259. static int fix_host_ownership(void)
  260. {
  261. struct kvm_pgtable_walker walker = {
  262. .cb = fix_host_ownership_walker,
  263. .flags = KVM_PGTABLE_WALK_LEAF,
  264. };
  265. int i, ret;
  266. for (i = 0; i < hyp_memblock_nr; i++) {
  267. struct memblock_region *reg = &hyp_memory[i];
  268. u64 start = (u64)hyp_phys_to_virt(reg->base);
  269. ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
  270. if (ret)
  271. return ret;
  272. }
  273. return 0;
  274. }
  275. static int fix_hyp_pgtable_refcnt(void)
  276. {
  277. struct kvm_pgtable_walker walker = {
  278. .cb = fix_hyp_pgtable_refcnt_walker,
  279. .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
  280. .arg = pkvm_pgtable.mm_ops,
  281. };
  282. return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
  283. &walker);
  284. }
  285. static int unmap_protected_regions(void)
  286. {
  287. struct pkvm_moveable_reg *reg;
  288. int i, ret;
  289. for (i = 0; i < pkvm_moveable_regs_nr; i++) {
  290. reg = &pkvm_moveable_regs[i];
  291. if (reg->type != PKVM_MREG_PROTECTED_RANGE)
  292. continue;
  293. ret = host_stage2_set_owner_locked(reg->start, reg->size,
  294. PKVM_ID_PROTECTED);
  295. if (ret)
  296. return ret;
  297. }
  298. return 0;
  299. }
  300. void __noreturn __pkvm_init_finalise(void)
  301. {
  302. struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
  303. struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
  304. unsigned long nr_pages, reserved_pages, pfn;
  305. int ret;
  306. /* Now that the vmemmap is backed, install the full-fledged allocator */
  307. pfn = hyp_virt_to_pfn(hyp_pgt_base);
  308. nr_pages = hyp_s1_pgtable_pages();
  309. reserved_pages = hyp_early_alloc_nr_used_pages();
  310. ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
  311. if (ret)
  312. goto out;
  313. ret = kvm_host_prepare_stage2(host_s2_pgt_base);
  314. if (ret)
  315. goto out;
  316. pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
  317. .zalloc_page = hyp_zalloc_hyp_page,
  318. .phys_to_virt = hyp_phys_to_virt,
  319. .virt_to_phys = hyp_virt_to_phys,
  320. .get_page = hpool_get_page,
  321. .put_page = hpool_put_page,
  322. .page_count = hyp_page_count,
  323. };
  324. pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
  325. ret = fix_hyp_pgtable_refcnt();
  326. if (ret)
  327. goto out;
  328. ret = hyp_create_pcpu_fixmap();
  329. if (ret)
  330. goto out;
  331. ret = fix_host_ownership();
  332. if (ret)
  333. goto out;
  334. ret = unmap_protected_regions();
  335. if (ret)
  336. goto out;
  337. ret = pin_host_tables();
  338. if (ret)
  339. goto out;
  340. ret = hyp_ffa_init(ffa_proxy_pages);
  341. if (ret)
  342. goto out;
  343. pkvm_hyp_vm_table_init(vm_table_base);
  344. out:
  345. /*
  346. * We tail-called to here from handle___pkvm_init() and will not return,
  347. * so make sure to propagate the return value to the host.
  348. */
  349. cpu_reg(host_ctxt, 1) = ret;
  350. __host_enter(host_ctxt);
  351. }
  352. int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
  353. unsigned long *per_cpu_base, u32 hyp_va_bits)
  354. {
  355. struct kvm_nvhe_init_params *params;
  356. void *virt = hyp_phys_to_virt(phys);
  357. void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
  358. int ret;
  359. BUG_ON(kvm_check_pvm_sysreg_table());
  360. if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
  361. return -EINVAL;
  362. hyp_spin_lock_init(&pkvm_pgd_lock);
  363. hyp_nr_cpus = nr_cpus;
  364. ret = divide_memory_pool(virt, size);
  365. if (ret)
  366. return ret;
  367. ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
  368. if (ret)
  369. return ret;
  370. update_nvhe_init_params();
  371. /* Jump in the idmap page to switch to the new page-tables */
  372. params = this_cpu_ptr(&kvm_init_params);
  373. fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
  374. fn(__hyp_pa(params), __pkvm_init_finalise);
  375. unreachable();
  376. }