pgd.c 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * PGD allocation/freeing
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. * Author: Catalin Marinas <[email protected]>
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/gfp.h>
  10. #include <linux/highmem.h>
  11. #include <linux/slab.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/page.h>
  14. #include <asm/tlbflush.h>
  15. #ifdef CONFIG_RKP
  16. #include <linux/rkp.h>
  17. #endif
  18. static struct kmem_cache *pgd_cache __ro_after_init;
  19. pgd_t *pgd_alloc(struct mm_struct *mm)
  20. {
  21. gfp_t gfp = GFP_PGTABLE_USER;
  22. #ifdef CONFIG_RKP
  23. pgd_t *ret = NULL;
  24. ret = (pgd_t *) rkp_ro_alloc();
  25. if (!ret) {
  26. if (PGD_SIZE == PAGE_SIZE)
  27. ret = (pgd_t *)__get_free_page(gfp);
  28. else
  29. ret = kmem_cache_alloc(pgd_cache, gfp);
  30. }
  31. if (unlikely(!ret)) {
  32. pr_warn("%s: pgd alloc is failed\n", __func__);
  33. return ret;
  34. }
  35. if (rkp_started)
  36. uh_call(UH_APP_RKP, RKP_PGD_RO, (u64)ret, 0, 0, 0);
  37. return ret;
  38. #else
  39. if (PGD_SIZE == PAGE_SIZE)
  40. return (pgd_t *)__get_free_page(gfp);
  41. else
  42. return kmem_cache_alloc(pgd_cache, gfp);
  43. #endif
  44. }
  45. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  46. {
  47. #ifdef CONFIG_RKP
  48. if (rkp_started)
  49. uh_call(UH_APP_RKP, RKP_PGD_RW, (u64)pgd, 0, 0, 0);
  50. /* if pgd memory come from read only buffer, the put it back */
  51. if (is_rkp_ro_buffer((u64)pgd)) {
  52. rkp_ro_free((void *)pgd);
  53. } else {
  54. if (PGD_SIZE == PAGE_SIZE)
  55. free_page((unsigned long)pgd);
  56. else
  57. kmem_cache_free(pgd_cache, pgd);
  58. }
  59. #else
  60. if (PGD_SIZE == PAGE_SIZE)
  61. free_page((unsigned long)pgd);
  62. else
  63. kmem_cache_free(pgd_cache, pgd);
  64. #endif
  65. }
  66. void __init pgtable_cache_init(void)
  67. {
  68. if (PGD_SIZE == PAGE_SIZE)
  69. return;
  70. #ifdef CONFIG_ARM64_PA_BITS_52
  71. /*
  72. * With 52-bit physical addresses, the architecture requires the
  73. * top-level table to be aligned to at least 64 bytes.
  74. */
  75. BUILD_BUG_ON(PGD_SIZE < 64);
  76. #endif
  77. /*
  78. * Naturally aligned pgds required by the architecture.
  79. */
  80. pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
  81. SLAB_PANIC, NULL);
  82. }