pgtable-frag.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Handling Page Tables through page fragments
  4. *
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/gfp.h>
  8. #include <linux/mm.h>
  9. #include <linux/percpu.h>
  10. #include <linux/hardirq.h>
  11. #include <linux/hugetlb.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/tlb.h>
  15. void pte_frag_destroy(void *pte_frag)
  16. {
  17. int count;
  18. struct page *page;
  19. page = virt_to_page(pte_frag);
  20. /* drop all the pending references */
  21. count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
  22. /* We allow PTE_FRAG_NR fragments from a PTE page */
  23. if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
  24. pgtable_pte_page_dtor(page);
  25. __free_page(page);
  26. }
  27. }
  28. static pte_t *get_pte_from_cache(struct mm_struct *mm)
  29. {
  30. void *pte_frag, *ret;
  31. if (PTE_FRAG_NR == 1)
  32. return NULL;
  33. spin_lock(&mm->page_table_lock);
  34. ret = pte_frag_get(&mm->context);
  35. if (ret) {
  36. pte_frag = ret + PTE_FRAG_SIZE;
  37. /*
  38. * If we have taken up all the fragments mark PTE page NULL
  39. */
  40. if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
  41. pte_frag = NULL;
  42. pte_frag_set(&mm->context, pte_frag);
  43. }
  44. spin_unlock(&mm->page_table_lock);
  45. return (pte_t *)ret;
  46. }
  47. static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
  48. {
  49. void *ret = NULL;
  50. struct page *page;
  51. if (!kernel) {
  52. page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
  53. if (!page)
  54. return NULL;
  55. if (!pgtable_pte_page_ctor(page)) {
  56. __free_page(page);
  57. return NULL;
  58. }
  59. } else {
  60. page = alloc_page(PGALLOC_GFP);
  61. if (!page)
  62. return NULL;
  63. }
  64. atomic_set(&page->pt_frag_refcount, 1);
  65. ret = page_address(page);
  66. /*
  67. * if we support only one fragment just return the
  68. * allocated page.
  69. */
  70. if (PTE_FRAG_NR == 1)
  71. return ret;
  72. spin_lock(&mm->page_table_lock);
  73. /*
  74. * If we find pgtable_page set, we return
  75. * the allocated page with single fragment
  76. * count.
  77. */
  78. if (likely(!pte_frag_get(&mm->context))) {
  79. atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
  80. pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
  81. }
  82. spin_unlock(&mm->page_table_lock);
  83. return (pte_t *)ret;
  84. }
  85. pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
  86. {
  87. pte_t *pte;
  88. pte = get_pte_from_cache(mm);
  89. if (pte)
  90. return pte;
  91. return __alloc_for_ptecache(mm, kernel);
  92. }
  93. void pte_fragment_free(unsigned long *table, int kernel)
  94. {
  95. struct page *page = virt_to_page(table);
  96. if (PageReserved(page))
  97. return free_reserved_page(page);
  98. BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
  99. if (atomic_dec_and_test(&page->pt_frag_refcount)) {
  100. if (!kernel)
  101. pgtable_pte_page_dtor(page);
  102. __free_page(page);
  103. }
  104. }