page-states.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2008
  4. *
  5. * Guest page hinting for unused pages.
  6. *
  7. * Author(s): Martin Schwidefsky <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/memblock.h>
  14. #include <linux/gfp.h>
  15. #include <linux/init.h>
  16. #include <asm/asm-extable.h>
  17. #include <asm/facility.h>
  18. #include <asm/page-states.h>
  19. static int cmma_flag = 1;
  20. static int __init cmma(char *str)
  21. {
  22. bool enabled;
  23. if (!kstrtobool(str, &enabled))
  24. cmma_flag = enabled;
  25. return 1;
  26. }
  27. __setup("cmma=", cmma);
  28. static inline int cmma_test_essa(void)
  29. {
  30. unsigned long tmp = 0;
  31. int rc = -EOPNOTSUPP;
  32. /* test ESSA_GET_STATE */
  33. asm volatile(
  34. " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
  35. "0: la %[rc],0\n"
  36. "1:\n"
  37. EX_TABLE(0b,1b)
  38. : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
  39. : [cmd] "i" (ESSA_GET_STATE));
  40. return rc;
  41. }
  42. void __init cmma_init(void)
  43. {
  44. if (!cmma_flag)
  45. return;
  46. if (cmma_test_essa()) {
  47. cmma_flag = 0;
  48. return;
  49. }
  50. if (test_facility(147))
  51. cmma_flag = 2;
  52. }
  53. static inline unsigned char get_page_state(struct page *page)
  54. {
  55. unsigned char state;
  56. asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
  57. : "=&d" (state)
  58. : "a" (page_to_phys(page)),
  59. "i" (ESSA_GET_STATE));
  60. return state & 0x3f;
  61. }
  62. static inline void set_page_unused(struct page *page, int order)
  63. {
  64. int i, rc;
  65. for (i = 0; i < (1 << order); i++)
  66. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  67. : "=&d" (rc)
  68. : "a" (page_to_phys(page + i)),
  69. "i" (ESSA_SET_UNUSED));
  70. }
  71. static inline void set_page_stable_dat(struct page *page, int order)
  72. {
  73. int i, rc;
  74. for (i = 0; i < (1 << order); i++)
  75. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  76. : "=&d" (rc)
  77. : "a" (page_to_phys(page + i)),
  78. "i" (ESSA_SET_STABLE));
  79. }
  80. static inline void set_page_stable_nodat(struct page *page, int order)
  81. {
  82. int i, rc;
  83. for (i = 0; i < (1 << order); i++)
  84. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  85. : "=&d" (rc)
  86. : "a" (page_to_phys(page + i)),
  87. "i" (ESSA_SET_STABLE_NODAT));
  88. }
  89. static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
  90. {
  91. unsigned long next;
  92. struct page *page;
  93. pmd_t *pmd;
  94. pmd = pmd_offset(pud, addr);
  95. do {
  96. next = pmd_addr_end(addr, end);
  97. if (pmd_none(*pmd) || pmd_large(*pmd))
  98. continue;
  99. page = phys_to_page(pmd_val(*pmd));
  100. set_bit(PG_arch_1, &page->flags);
  101. } while (pmd++, addr = next, addr != end);
  102. }
  103. static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
  104. {
  105. unsigned long next;
  106. struct page *page;
  107. pud_t *pud;
  108. int i;
  109. pud = pud_offset(p4d, addr);
  110. do {
  111. next = pud_addr_end(addr, end);
  112. if (pud_none(*pud) || pud_large(*pud))
  113. continue;
  114. if (!pud_folded(*pud)) {
  115. page = phys_to_page(pud_val(*pud));
  116. for (i = 0; i < 4; i++)
  117. set_bit(PG_arch_1, &page[i].flags);
  118. }
  119. mark_kernel_pmd(pud, addr, next);
  120. } while (pud++, addr = next, addr != end);
  121. }
  122. static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
  123. {
  124. unsigned long next;
  125. struct page *page;
  126. p4d_t *p4d;
  127. int i;
  128. p4d = p4d_offset(pgd, addr);
  129. do {
  130. next = p4d_addr_end(addr, end);
  131. if (p4d_none(*p4d))
  132. continue;
  133. if (!p4d_folded(*p4d)) {
  134. page = phys_to_page(p4d_val(*p4d));
  135. for (i = 0; i < 4; i++)
  136. set_bit(PG_arch_1, &page[i].flags);
  137. }
  138. mark_kernel_pud(p4d, addr, next);
  139. } while (p4d++, addr = next, addr != end);
  140. }
  141. static void mark_kernel_pgd(void)
  142. {
  143. unsigned long addr, next;
  144. struct page *page;
  145. pgd_t *pgd;
  146. int i;
  147. addr = 0;
  148. pgd = pgd_offset_k(addr);
  149. do {
  150. next = pgd_addr_end(addr, MODULES_END);
  151. if (pgd_none(*pgd))
  152. continue;
  153. if (!pgd_folded(*pgd)) {
  154. page = phys_to_page(pgd_val(*pgd));
  155. for (i = 0; i < 4; i++)
  156. set_bit(PG_arch_1, &page[i].flags);
  157. }
  158. mark_kernel_p4d(pgd, addr, next);
  159. } while (pgd++, addr = next, addr != MODULES_END);
  160. }
  161. void __init cmma_init_nodat(void)
  162. {
  163. struct page *page;
  164. unsigned long start, end, ix;
  165. int i;
  166. if (cmma_flag < 2)
  167. return;
  168. /* Mark pages used in kernel page tables */
  169. mark_kernel_pgd();
  170. /* Set all kernel pages not used for page tables to stable/no-dat */
  171. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
  172. page = pfn_to_page(start);
  173. for (ix = start; ix < end; ix++, page++) {
  174. if (__test_and_clear_bit(PG_arch_1, &page->flags))
  175. continue; /* skip page table pages */
  176. if (!list_empty(&page->lru))
  177. continue; /* skip free pages */
  178. set_page_stable_nodat(page, 0);
  179. }
  180. }
  181. }
  182. void arch_free_page(struct page *page, int order)
  183. {
  184. if (!cmma_flag)
  185. return;
  186. set_page_unused(page, order);
  187. }
  188. void arch_alloc_page(struct page *page, int order)
  189. {
  190. if (!cmma_flag)
  191. return;
  192. if (cmma_flag < 2)
  193. set_page_stable_dat(page, order);
  194. else
  195. set_page_stable_nodat(page, order);
  196. }
  197. void arch_set_page_dat(struct page *page, int order)
  198. {
  199. if (!cmma_flag)
  200. return;
  201. set_page_stable_dat(page, order);
  202. }