android_erratum_pgtable.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2023 - Google LLC
  4. * Author: Will Deacon <[email protected]>
  5. */
  6. #include <asm/kvm_host.h>
  7. #include <asm/pgtable.h>
  8. #include <linux/init.h>
  9. #include <linux/jump_label.h>
  10. #include <linux/memblock.h>
  11. DEFINE_STATIC_KEY_FALSE(pkvm_force_nc);
  12. static int __init early_pkvm_force_nc_cfg(char *arg)
  13. {
  14. static_branch_enable(&pkvm_force_nc);
  15. return 0;
  16. }
  17. early_param("kvm-arm.force_nc", early_pkvm_force_nc_cfg);
  18. /*
  19. * Update the stage-2 memory attributes (cacheability) for a page, usually
  20. * in response to mapping or unmapping a normal non-cacheable region at stage-1.
  21. *
  22. * If 'force_nc' is set, the stage-2 entry is immediately made non-cacheable
  23. * (and cleaned+invalidated to the PoC) otherwise the entry is unmapped and the
  24. * cacheability determined based on the stage-1 attribute of the next access
  25. * (with no cache maintenance being performed).
  26. */
  27. struct pkvm_host_nc_region {
  28. phys_addr_t start;
  29. phys_addr_t end;
  30. };
  31. #define PKVM_HOST_MAX_EARLY_NC_REGIONS 8
  32. static struct pkvm_host_nc_region
  33. pkvm_host_early_nc_regions[PKVM_HOST_MAX_EARLY_NC_REGIONS];
  34. static void pkvm_host_track_early_nc_mapping(phys_addr_t addr)
  35. {
  36. static int idx /*= 0*/;
  37. struct pkvm_host_nc_region *reg = &pkvm_host_early_nc_regions[idx];
  38. if (reg->start == reg->end) {
  39. reg->start = addr;
  40. } else if (reg->end != addr) {
  41. if (WARN_ON(idx == PKVM_HOST_MAX_EARLY_NC_REGIONS - 1))
  42. return;
  43. reg = &pkvm_host_early_nc_regions[++idx];
  44. reg->start = addr;
  45. }
  46. reg->end = addr + PAGE_SIZE;
  47. }
  48. void pkvm_host_set_stage2_memattr(phys_addr_t addr, bool force_nc)
  49. {
  50. int err;
  51. if (kvm_get_mode() != KVM_MODE_PROTECTED)
  52. return;
  53. /*
  54. * Non-memory regions or carveouts marked as "no-map" are handled
  55. * entirely by their corresponding driver, which should avoid the
  56. * creation of a cacheable alias in the first place.
  57. */
  58. if (!memblock_is_map_memory(addr))
  59. return;
  60. if (!is_pkvm_initialized()) {
  61. if (!WARN_ON_ONCE(!force_nc))
  62. pkvm_host_track_early_nc_mapping(addr);
  63. return;
  64. }
  65. err = kvm_call_hyp_nvhe(__pkvm_host_set_stage2_memattr, addr, force_nc);
  66. WARN_ON(err && err != -EAGAIN);
  67. }
  68. EXPORT_SYMBOL_GPL(pkvm_host_set_stage2_memattr);
  69. int __init pkvm_register_early_nc_mappings(void)
  70. {
  71. int i;
  72. if (!is_pkvm_initialized())
  73. return 0;
  74. for (i = 0; i < PKVM_HOST_MAX_EARLY_NC_REGIONS; ++i) {
  75. struct pkvm_host_nc_region *reg = &pkvm_host_early_nc_regions[i];
  76. if (reg->start == reg->end)
  77. return 0;
  78. while (reg->start != reg->end) {
  79. int err;
  80. err = kvm_call_hyp_nvhe(__pkvm_host_set_stage2_memattr, reg->start, true);
  81. if (err)
  82. return err;
  83. reg->start += PAGE_SIZE;
  84. }
  85. }
  86. return 0;
  87. }