ioremap.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #define pr_fmt(fmt) "ioremap: " fmt
  3. #include <linux/mm.h>
  4. #include <linux/io.h>
  5. #include <linux/arm-smccc.h>
  6. #include <linux/slab.h>
  7. #include <asm/fixmap.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/hypervisor.h>
  10. #ifndef ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO
  11. #define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO 5
  12. #define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID \
  13. ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
  14. ARM_SMCCC_SMC_64, \
  15. ARM_SMCCC_OWNER_VENDOR_HYP, \
  16. ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO)
  17. #endif /* ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO */
  18. #ifndef ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL
  19. #define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL 6
  20. #define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID \
  21. ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
  22. ARM_SMCCC_SMC_64, \
  23. ARM_SMCCC_OWNER_VENDOR_HYP, \
  24. ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL)
  25. #endif /* ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL */
  26. #ifndef ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP
  27. #define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP 7
  28. #define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID \
  29. ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
  30. ARM_SMCCC_SMC_64, \
  31. ARM_SMCCC_OWNER_VENDOR_HYP, \
  32. ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP)
  33. #endif /* ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP */
  34. #ifndef ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP
  35. #define ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP 8
  36. #define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID \
  37. ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
  38. ARM_SMCCC_SMC_64, \
  39. ARM_SMCCC_OWNER_VENDOR_HYP, \
  40. ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP)
  41. #endif /* ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP */
  42. struct ioremap_guard_ref {
  43. refcount_t count;
  44. };
  45. static DEFINE_STATIC_KEY_FALSE(ioremap_guard_key);
  46. static DEFINE_XARRAY(ioremap_guard_array);
  47. static DEFINE_MUTEX(ioremap_guard_lock);
  48. static size_t guard_granule;
  49. static bool ioremap_guard;
  50. static int __init ioremap_guard_setup(char *str)
  51. {
  52. ioremap_guard = true;
  53. return 0;
  54. }
  55. early_param("ioremap_guard", ioremap_guard_setup);
  56. void kvm_init_ioremap_services(void)
  57. {
  58. struct arm_smccc_res res;
  59. size_t granule;
  60. if (!ioremap_guard)
  61. return;
  62. /* We need all the functions to be implemented */
  63. if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_INFO) ||
  64. !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_ENROLL) ||
  65. !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP) ||
  66. !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_UNMAP))
  67. return;
  68. arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID,
  69. 0, 0, 0, &res);
  70. granule = res.a0;
  71. if (granule > PAGE_SIZE || !granule || (granule & (granule - 1))) {
  72. pr_warn("KVM MMIO guard initialization failed: "
  73. "guard granule (%lu), page size (%lu)\n",
  74. granule, PAGE_SIZE);
  75. return;
  76. }
  77. arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID,
  78. &res);
  79. if (res.a0 == SMCCC_RET_SUCCESS) {
  80. guard_granule = granule;
  81. static_branch_enable(&ioremap_guard_key);
  82. pr_info("Using KVM MMIO guard for ioremap\n");
  83. } else {
  84. pr_warn("KVM MMIO guard registration failed (%ld)\n", res.a0);
  85. }
  86. }
  87. void ioremap_phys_range_hook(phys_addr_t phys_addr, size_t size, pgprot_t prot)
  88. {
  89. int guard_shift;
  90. if (!static_branch_unlikely(&ioremap_guard_key))
  91. return;
  92. guard_shift = __builtin_ctzl(guard_granule);
  93. mutex_lock(&ioremap_guard_lock);
  94. while (size) {
  95. u64 guard_fn = phys_addr >> guard_shift;
  96. struct ioremap_guard_ref *ref;
  97. struct arm_smccc_res res;
  98. if (pfn_valid(__phys_to_pfn(phys_addr)))
  99. goto next;
  100. ref = xa_load(&ioremap_guard_array, guard_fn);
  101. if (ref) {
  102. refcount_inc(&ref->count);
  103. goto next;
  104. }
  105. /*
  106. * It is acceptable for the allocation to fail, specially
  107. * if trying to ioremap something very early on, like with
  108. * earlycon, which happens long before kmem_cache_init.
  109. * This page will be permanently accessible, similar to a
  110. * saturated refcount.
  111. */
  112. if (slab_is_available())
  113. ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  114. if (ref) {
  115. refcount_set(&ref->count, 1);
  116. if (xa_err(xa_store(&ioremap_guard_array, guard_fn, ref,
  117. GFP_KERNEL))) {
  118. kfree(ref);
  119. ref = NULL;
  120. }
  121. }
  122. arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID,
  123. phys_addr, prot, &res);
  124. if (res.a0 != SMCCC_RET_SUCCESS) {
  125. pr_warn_ratelimited("Failed to register %llx\n",
  126. phys_addr);
  127. xa_erase(&ioremap_guard_array, guard_fn);
  128. kfree(ref);
  129. goto out;
  130. }
  131. next:
  132. size -= guard_granule;
  133. phys_addr += guard_granule;
  134. }
  135. out:
  136. mutex_unlock(&ioremap_guard_lock);
  137. }
  138. void iounmap_phys_range_hook(phys_addr_t phys_addr, size_t size)
  139. {
  140. int guard_shift;
  141. if (!static_branch_unlikely(&ioremap_guard_key))
  142. return;
  143. VM_BUG_ON(phys_addr & ~PAGE_MASK || size & ~PAGE_MASK);
  144. guard_shift = __builtin_ctzl(guard_granule);
  145. mutex_lock(&ioremap_guard_lock);
  146. while (size) {
  147. u64 guard_fn = phys_addr >> guard_shift;
  148. struct ioremap_guard_ref *ref;
  149. struct arm_smccc_res res;
  150. ref = xa_load(&ioremap_guard_array, guard_fn);
  151. if (!ref) {
  152. pr_warn_ratelimited("%llx not tracked, left mapped\n",
  153. phys_addr);
  154. goto next;
  155. }
  156. if (!refcount_dec_and_test(&ref->count))
  157. goto next;
  158. xa_erase(&ioremap_guard_array, guard_fn);
  159. kfree(ref);
  160. arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID,
  161. phys_addr, &res);
  162. if (res.a0 != SMCCC_RET_SUCCESS) {
  163. pr_warn_ratelimited("Failed to unregister %llx\n",
  164. phys_addr);
  165. goto out;
  166. }
  167. next:
  168. size -= guard_granule;
  169. phys_addr += guard_granule;
  170. }
  171. out:
  172. mutex_unlock(&ioremap_guard_lock);
  173. }
  174. bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
  175. {
  176. unsigned long last_addr = phys_addr + size - 1;
  177. /* Don't allow outside PHYS_MASK */
  178. if (last_addr & ~PHYS_MASK)
  179. return false;
  180. /* Don't allow RAM to be mapped. */
  181. if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
  182. return false;
  183. return true;
  184. }
  185. /*
  186. * Must be called after early_fixmap_init
  187. */
  188. void __init early_ioremap_init(void)
  189. {
  190. early_ioremap_setup();
  191. }
  192. bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
  193. unsigned long flags)
  194. {
  195. unsigned long pfn = PHYS_PFN(offset);
  196. return pfn_is_map_memory(pfn);
  197. }