mm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 Google LLC
  4. * Author: Quentin Perret <[email protected]>
  5. */
  6. #include <linux/kvm_host.h>
  7. #include <asm/kvm_hyp.h>
  8. #include <asm/kvm_mmu.h>
  9. #include <asm/kvm_pgtable.h>
  10. #include <asm/kvm_pkvm.h>
  11. #include <asm/spectre.h>
  12. #include <nvhe/early_alloc.h>
  13. #include <nvhe/gfp.h>
  14. #include <nvhe/memory.h>
  15. #include <nvhe/mem_protect.h>
  16. #include <nvhe/mm.h>
  17. #include <nvhe/modules.h>
  18. #include <nvhe/spinlock.h>
  19. struct kvm_pgtable pkvm_pgtable;
  20. hyp_spinlock_t pkvm_pgd_lock;
  21. struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
  22. unsigned int hyp_memblock_nr;
  23. static u64 __private_range_base;
  24. static u64 __private_range_cur;
  25. struct hyp_fixmap_slot {
  26. u64 addr;
  27. kvm_pte_t *ptep;
  28. };
  29. static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
  30. static int __pkvm_create_mappings(unsigned long start, unsigned long size,
  31. unsigned long phys, enum kvm_pgtable_prot prot)
  32. {
  33. int err;
  34. hyp_spin_lock(&pkvm_pgd_lock);
  35. err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
  36. hyp_spin_unlock(&pkvm_pgd_lock);
  37. return err;
  38. }
  39. /**
  40. * pkvm_alloc_private_va_range - Allocates a private VA range.
  41. * @size: The size of the VA range to reserve.
  42. * @haddr: The hypervisor virtual start address of the allocation.
  43. *
  44. * The private virtual address (VA) range is allocated above __private_range_base
  45. * and aligned based on the order of @size.
  46. *
  47. * Return: 0 on success or negative error code on failure.
  48. */
  49. int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
  50. {
  51. unsigned long cur, addr;
  52. int ret = 0;
  53. hyp_spin_lock(&pkvm_pgd_lock);
  54. /* Align the allocation based on the order of its size */
  55. addr = ALIGN(__private_range_cur, PAGE_SIZE << get_order(size));
  56. /* The allocated size is always a multiple of PAGE_SIZE */
  57. cur = addr + PAGE_ALIGN(size);
  58. /* Has the private range grown too large ? */
  59. if (!addr || cur > __hyp_vmemmap || (cur - __private_range_base) > __PKVM_PRIVATE_SZ) {
  60. ret = -ENOMEM;
  61. } else {
  62. __private_range_cur = cur;
  63. *haddr = addr;
  64. }
  65. hyp_spin_unlock(&pkvm_pgd_lock);
  66. return ret;
  67. }
  68. int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
  69. enum kvm_pgtable_prot prot,
  70. unsigned long *haddr)
  71. {
  72. unsigned long addr;
  73. int err;
  74. size = PAGE_ALIGN(size + offset_in_page(phys));
  75. err = pkvm_alloc_private_va_range(size, &addr);
  76. if (err)
  77. return err;
  78. err = __pkvm_create_mappings(addr, size, phys, prot);
  79. if (err)
  80. return err;
  81. *haddr = addr + offset_in_page(phys);
  82. return err;
  83. }
  84. #ifdef CONFIG_NVHE_EL2_DEBUG
  85. static unsigned long mod_range_start = ULONG_MAX;
  86. static unsigned long mod_range_end;
  87. static DEFINE_HYP_SPINLOCK(mod_range_lock);
  88. static void update_mod_range(unsigned long addr, size_t size)
  89. {
  90. hyp_spin_lock(&mod_range_lock);
  91. mod_range_start = min(mod_range_start, addr);
  92. mod_range_end = max(mod_range_end, addr + size);
  93. hyp_spin_unlock(&mod_range_lock);
  94. }
  95. void assert_in_mod_range(unsigned long addr)
  96. {
  97. /*
  98. * This is not entirely watertight if there are private range
  99. * allocations between modules being loaded, but in practice that is
  100. * probably going to be allocation initiated by the modules themselves.
  101. */
  102. hyp_spin_lock(&mod_range_lock);
  103. WARN_ON(addr < mod_range_start || mod_range_end <= addr);
  104. hyp_spin_unlock(&mod_range_lock);
  105. }
  106. #else
  107. static inline void update_mod_range(unsigned long addr, size_t size) { }
  108. #endif
  109. void *__pkvm_alloc_module_va(u64 nr_pages)
  110. {
  111. size_t size = nr_pages << PAGE_SHIFT;
  112. unsigned long addr = 0;
  113. if (!pkvm_alloc_private_va_range(size, &addr))
  114. update_mod_range(addr, size);
  115. return (void *)addr;
  116. }
  117. int __pkvm_map_module_page(u64 pfn, void *va, enum kvm_pgtable_prot prot, bool is_protected)
  118. {
  119. unsigned long addr = (unsigned long)va;
  120. int ret;
  121. assert_in_mod_range(addr);
  122. if (!is_protected) {
  123. ret = __pkvm_host_donate_hyp(pfn, 1);
  124. if (ret)
  125. return ret;
  126. }
  127. ret = __pkvm_create_mappings(addr, PAGE_SIZE, hyp_pfn_to_phys(pfn), prot);
  128. if (ret && !is_protected)
  129. WARN_ON(__pkvm_hyp_donate_host(pfn, 1));
  130. return ret;
  131. }
  132. void __pkvm_unmap_module_page(u64 pfn, void *va)
  133. {
  134. WARN_ON(__pkvm_hyp_donate_host(pfn, 1));
  135. pkvm_remove_mappings(va, va + PAGE_SIZE);
  136. }
  137. int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
  138. {
  139. unsigned long start = (unsigned long)from;
  140. unsigned long end = (unsigned long)to;
  141. unsigned long virt_addr;
  142. phys_addr_t phys;
  143. hyp_assert_lock_held(&pkvm_pgd_lock);
  144. start = start & PAGE_MASK;
  145. end = PAGE_ALIGN(end);
  146. for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
  147. int err;
  148. phys = hyp_virt_to_phys((void *)virt_addr);
  149. err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
  150. phys, prot);
  151. if (err)
  152. return err;
  153. }
  154. return 0;
  155. }
  156. int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
  157. {
  158. int ret;
  159. hyp_spin_lock(&pkvm_pgd_lock);
  160. ret = pkvm_create_mappings_locked(from, to, prot);
  161. hyp_spin_unlock(&pkvm_pgd_lock);
  162. return ret;
  163. }
  164. void pkvm_remove_mappings(void *from, void *to)
  165. {
  166. unsigned long size = (unsigned long)to - (unsigned long)from;
  167. hyp_spin_lock(&pkvm_pgd_lock);
  168. WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)from, size) != size);
  169. hyp_spin_unlock(&pkvm_pgd_lock);
  170. }
  171. int hyp_back_vmemmap(phys_addr_t back)
  172. {
  173. unsigned long i, start, size, end = 0;
  174. int ret;
  175. for (i = 0; i < hyp_memblock_nr; i++) {
  176. start = hyp_memory[i].base;
  177. start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
  178. /*
  179. * The begining of the hyp_vmemmap region for the current
  180. * memblock may already be backed by the page backing the end
  181. * the previous region, so avoid mapping it twice.
  182. */
  183. start = max(start, end);
  184. end = hyp_memory[i].base + hyp_memory[i].size;
  185. end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
  186. if (start >= end)
  187. continue;
  188. size = end - start;
  189. ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
  190. if (ret)
  191. return ret;
  192. memset(hyp_phys_to_virt(back), 0, size);
  193. back += size;
  194. }
  195. return 0;
  196. }
  197. static void *__hyp_bp_vect_base;
  198. int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
  199. {
  200. void *vector;
  201. switch (slot) {
  202. case HYP_VECTOR_DIRECT: {
  203. vector = __kvm_hyp_vector;
  204. break;
  205. }
  206. case HYP_VECTOR_SPECTRE_DIRECT: {
  207. vector = __bp_harden_hyp_vecs;
  208. break;
  209. }
  210. case HYP_VECTOR_INDIRECT:
  211. case HYP_VECTOR_SPECTRE_INDIRECT: {
  212. vector = (void *)__hyp_bp_vect_base;
  213. break;
  214. }
  215. default:
  216. return -EINVAL;
  217. }
  218. vector = __kvm_vector_slot2addr(vector, slot);
  219. *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
  220. return 0;
  221. }
  222. int hyp_map_vectors(void)
  223. {
  224. phys_addr_t phys;
  225. unsigned long bp_base;
  226. int ret;
  227. if (!kvm_system_needs_idmapped_vectors()) {
  228. __hyp_bp_vect_base = __bp_harden_hyp_vecs;
  229. return 0;
  230. }
  231. phys = __hyp_pa(__bp_harden_hyp_vecs);
  232. ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
  233. PAGE_HYP_EXEC, &bp_base);
  234. if (ret)
  235. return ret;
  236. __hyp_bp_vect_base = (void *)bp_base;
  237. return 0;
  238. }
  239. void *hyp_fixmap_map(phys_addr_t phys)
  240. {
  241. struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
  242. kvm_pte_t pte, *ptep = slot->ptep;
  243. pte = *ptep;
  244. pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
  245. pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
  246. WRITE_ONCE(*ptep, pte);
  247. dsb(ishst);
  248. return (void *)slot->addr + offset_in_page(phys);
  249. }
  250. #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
  251. void *hyp_fixmap_map_nc(phys_addr_t phys)
  252. {
  253. struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
  254. kvm_pte_t pte, *ptep = slot->ptep;
  255. pte = *ptep;
  256. pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
  257. pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID |
  258. FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, MT_NORMAL_NC);
  259. WRITE_ONCE(*ptep, pte);
  260. dsb(ishst);
  261. return (void *)slot->addr;
  262. }
  263. static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
  264. {
  265. kvm_pte_t *ptep = slot->ptep;
  266. u64 addr = slot->addr;
  267. /* Zap the memory type too. MT_NORMAL is 0 so the fixmap is cacheable by default */
  268. WRITE_ONCE(*ptep, *ptep & ~(KVM_PTE_VALID | KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX));
  269. /*
  270. * Irritatingly, the architecture requires that we use inner-shareable
  271. * broadcast TLB invalidation here in case another CPU speculates
  272. * through our fixmap and decides to create an "amalagamation of the
  273. * values held in the TLB" due to the apparent lack of a
  274. * break-before-make sequence.
  275. *
  276. * https://lore.kernel.org/kvm/[email protected]/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
  277. */
  278. dsb(ishst);
  279. __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1));
  280. dsb(ish);
  281. isb();
  282. }
  283. void hyp_fixmap_unmap(void)
  284. {
  285. fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
  286. }
  287. static int __create_fixmap_slot_cb(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
  288. enum kvm_pgtable_walk_flags flag,
  289. void * const arg)
  290. {
  291. struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)arg);
  292. if (!kvm_pte_valid(*ptep) || level != KVM_PGTABLE_MAX_LEVELS - 1)
  293. return -EINVAL;
  294. slot->addr = addr;
  295. slot->ptep = ptep;
  296. /*
  297. * Clear the PTE, but keep the page-table page refcount elevated to
  298. * prevent it from ever being freed. This lets us manipulate the PTEs
  299. * by hand safely without ever needing to allocate memory.
  300. */
  301. fixmap_clear_slot(slot);
  302. return 0;
  303. }
  304. static int create_fixmap_slot(u64 addr, u64 cpu)
  305. {
  306. struct kvm_pgtable_walker walker = {
  307. .cb = __create_fixmap_slot_cb,
  308. .flags = KVM_PGTABLE_WALK_LEAF,
  309. .arg = (void *)cpu,
  310. };
  311. return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
  312. }
  313. int hyp_create_pcpu_fixmap(void)
  314. {
  315. unsigned long addr, i;
  316. int ret;
  317. for (i = 0; i < hyp_nr_cpus; i++) {
  318. ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
  319. if (ret)
  320. return ret;
  321. ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
  322. __hyp_pa(__hyp_bss_start), PAGE_HYP);
  323. if (ret)
  324. return ret;
  325. ret = create_fixmap_slot(addr, i);
  326. if (ret)
  327. return ret;
  328. }
  329. return 0;
  330. }
  331. int hyp_create_idmap(u32 hyp_va_bits)
  332. {
  333. unsigned long start, end;
  334. start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
  335. start = ALIGN_DOWN(start, PAGE_SIZE);
  336. end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
  337. end = ALIGN(end, PAGE_SIZE);
  338. /*
  339. * One half of the VA space is reserved to linearly map portions of
  340. * memory -- see va_layout.c for more details. The other half of the VA
  341. * space contains the trampoline page, and needs some care. Split that
  342. * second half in two and find the quarter of VA space not conflicting
  343. * with the idmap to place the IOs and the vmemmap. IOs use the lower
  344. * half of the quarter and the vmemmap the upper half.
  345. */
  346. __private_range_base = start & BIT(hyp_va_bits - 2);
  347. __private_range_base ^= BIT(hyp_va_bits - 2);
  348. __private_range_cur = __private_range_base;
  349. __hyp_vmemmap = __private_range_base | BIT(hyp_va_bits - 3);
  350. return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
  351. }
  352. static void *admit_host_page(void *arg)
  353. {
  354. struct kvm_hyp_memcache *host_mc = arg;
  355. if (!host_mc->nr_pages)
  356. return NULL;
  357. /*
  358. * The host still owns the pages in its memcache, so we need to go
  359. * through a full host-to-hyp donation cycle to change it. Fortunately,
  360. * __pkvm_host_donate_hyp() takes care of races for us, so if it
  361. * succeeds we're good to go.
  362. */
  363. if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
  364. return NULL;
  365. return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
  366. }
  367. /* Refill our local memcache by poping pages from the one provided by the host. */
  368. int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
  369. struct kvm_hyp_memcache *host_mc)
  370. {
  371. struct kvm_hyp_memcache tmp = *host_mc;
  372. int ret;
  373. ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
  374. hyp_virt_to_phys, &tmp);
  375. *host_mc = tmp;
  376. return ret;
  377. }