memtrace.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) IBM Corporation, 2014, 2017
  4. * Anton Blanchard, Rashmica Gupta.
  5. */
  6. #define pr_fmt(fmt) "memtrace: " fmt
  7. #include <linux/bitops.h>
  8. #include <linux/string.h>
  9. #include <linux/memblock.h>
  10. #include <linux/init.h>
  11. #include <linux/moduleparam.h>
  12. #include <linux/fs.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/slab.h>
  15. #include <linux/memory.h>
  16. #include <linux/memory_hotplug.h>
  17. #include <linux/numa.h>
  18. #include <asm/machdep.h>
  19. #include <asm/cacheflush.h>
  20. /* This enables us to keep track of the memory removed from each node. */
  21. struct memtrace_entry {
  22. void *mem;
  23. u64 start;
  24. u64 size;
  25. u32 nid;
  26. struct dentry *dir;
  27. char name[16];
  28. };
  29. static DEFINE_MUTEX(memtrace_mutex);
  30. static u64 memtrace_size;
  31. static struct memtrace_entry *memtrace_array;
  32. static unsigned int memtrace_array_nr;
  33. static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
  34. size_t count, loff_t *ppos)
  35. {
  36. struct memtrace_entry *ent = filp->private_data;
  37. return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
  38. }
  39. static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
  40. {
  41. struct memtrace_entry *ent = filp->private_data;
  42. if (ent->size < vma->vm_end - vma->vm_start)
  43. return -EINVAL;
  44. if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
  45. return -EINVAL;
  46. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  47. return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
  48. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  49. }
  50. static const struct file_operations memtrace_fops = {
  51. .llseek = default_llseek,
  52. .read = memtrace_read,
  53. .open = simple_open,
  54. .mmap = memtrace_mmap,
  55. };
  56. #define FLUSH_CHUNK_SIZE SZ_1G
  57. /**
  58. * flush_dcache_range_chunked(): Write any modified data cache blocks out to
  59. * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
  60. * Does not invalidate the corresponding instruction cache blocks.
  61. *
  62. * @start: the start address
  63. * @stop: the stop address (exclusive)
  64. * @chunk: the max size of the chunks
  65. */
  66. static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
  67. unsigned long chunk)
  68. {
  69. unsigned long i;
  70. for (i = start; i < stop; i += chunk) {
  71. flush_dcache_range(i, min(stop, i + chunk));
  72. cond_resched();
  73. }
  74. }
  75. static void memtrace_clear_range(unsigned long start_pfn,
  76. unsigned long nr_pages)
  77. {
  78. unsigned long pfn;
  79. /* As HIGHMEM does not apply, use clear_page() directly. */
  80. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
  81. if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
  82. cond_resched();
  83. clear_page(__va(PFN_PHYS(pfn)));
  84. }
  85. /*
  86. * Before we go ahead and use this range as cache inhibited range
  87. * flush the cache.
  88. */
  89. flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
  90. (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
  91. FLUSH_CHUNK_SIZE);
  92. }
  93. static u64 memtrace_alloc_node(u32 nid, u64 size)
  94. {
  95. const unsigned long nr_pages = PHYS_PFN(size);
  96. unsigned long pfn, start_pfn;
  97. struct page *page;
  98. /*
  99. * Trace memory needs to be aligned to the size, which is guaranteed
  100. * by alloc_contig_pages().
  101. */
  102. page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
  103. __GFP_NOWARN, nid, NULL);
  104. if (!page)
  105. return 0;
  106. start_pfn = page_to_pfn(page);
  107. /*
  108. * Clear the range while we still have a linear mapping.
  109. *
  110. * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
  111. */
  112. memtrace_clear_range(start_pfn, nr_pages);
  113. /*
  114. * Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
  115. * dumping, ...) should be touching these pages.
  116. */
  117. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
  118. __SetPageOffline(pfn_to_page(pfn));
  119. arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
  120. return PFN_PHYS(start_pfn);
  121. }
  122. static int memtrace_init_regions_runtime(u64 size)
  123. {
  124. u32 nid;
  125. u64 m;
  126. memtrace_array = kcalloc(num_online_nodes(),
  127. sizeof(struct memtrace_entry), GFP_KERNEL);
  128. if (!memtrace_array) {
  129. pr_err("Failed to allocate memtrace_array\n");
  130. return -EINVAL;
  131. }
  132. for_each_online_node(nid) {
  133. m = memtrace_alloc_node(nid, size);
  134. /*
  135. * A node might not have any local memory, so warn but
  136. * continue on.
  137. */
  138. if (!m) {
  139. pr_err("Failed to allocate trace memory on node %d\n", nid);
  140. continue;
  141. }
  142. pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
  143. memtrace_array[memtrace_array_nr].start = m;
  144. memtrace_array[memtrace_array_nr].size = size;
  145. memtrace_array[memtrace_array_nr].nid = nid;
  146. memtrace_array_nr++;
  147. }
  148. return 0;
  149. }
  150. static struct dentry *memtrace_debugfs_dir;
  151. static int memtrace_init_debugfs(void)
  152. {
  153. int ret = 0;
  154. int i;
  155. for (i = 0; i < memtrace_array_nr; i++) {
  156. struct dentry *dir;
  157. struct memtrace_entry *ent = &memtrace_array[i];
  158. ent->mem = ioremap(ent->start, ent->size);
  159. /* Warn but continue on */
  160. if (!ent->mem) {
  161. pr_err("Failed to map trace memory at 0x%llx\n",
  162. ent->start);
  163. ret = -1;
  164. continue;
  165. }
  166. snprintf(ent->name, 16, "%08x", ent->nid);
  167. dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
  168. ent->dir = dir;
  169. debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
  170. debugfs_create_x64("start", 0400, dir, &ent->start);
  171. debugfs_create_x64("size", 0400, dir, &ent->size);
  172. }
  173. return ret;
  174. }
  175. static int memtrace_free(int nid, u64 start, u64 size)
  176. {
  177. struct mhp_params params = { .pgprot = PAGE_KERNEL };
  178. const unsigned long nr_pages = PHYS_PFN(size);
  179. const unsigned long start_pfn = PHYS_PFN(start);
  180. unsigned long pfn;
  181. int ret;
  182. ret = arch_create_linear_mapping(nid, start, size, &params);
  183. if (ret)
  184. return ret;
  185. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
  186. __ClearPageOffline(pfn_to_page(pfn));
  187. free_contig_range(start_pfn, nr_pages);
  188. return 0;
  189. }
  190. /*
  191. * Iterate through the chunks of memory we allocated and attempt to expose
  192. * them back to the kernel.
  193. */
  194. static int memtrace_free_regions(void)
  195. {
  196. int i, ret = 0;
  197. struct memtrace_entry *ent;
  198. for (i = memtrace_array_nr - 1; i >= 0; i--) {
  199. ent = &memtrace_array[i];
  200. /* We have freed this chunk previously */
  201. if (ent->nid == NUMA_NO_NODE)
  202. continue;
  203. /* Remove from io mappings */
  204. if (ent->mem) {
  205. iounmap(ent->mem);
  206. ent->mem = 0;
  207. }
  208. if (memtrace_free(ent->nid, ent->start, ent->size)) {
  209. pr_err("Failed to free trace memory on node %d\n",
  210. ent->nid);
  211. ret += 1;
  212. continue;
  213. }
  214. /*
  215. * Memory was freed successfully so clean up references to it
  216. * so on reentry we can tell that this chunk was freed.
  217. */
  218. debugfs_remove_recursive(ent->dir);
  219. pr_info("Freed trace memory back on node %d\n", ent->nid);
  220. ent->size = ent->start = ent->nid = NUMA_NO_NODE;
  221. }
  222. if (ret)
  223. return ret;
  224. /* If all chunks of memory were freed successfully, reset globals */
  225. kfree(memtrace_array);
  226. memtrace_array = NULL;
  227. memtrace_size = 0;
  228. memtrace_array_nr = 0;
  229. return 0;
  230. }
  231. static int memtrace_enable_set(void *data, u64 val)
  232. {
  233. int rc = -EAGAIN;
  234. u64 bytes;
  235. /*
  236. * Don't attempt to do anything if size isn't aligned to a memory
  237. * block or equal to zero.
  238. */
  239. bytes = memory_block_size_bytes();
  240. if (val & (bytes - 1)) {
  241. pr_err("Value must be aligned with 0x%llx\n", bytes);
  242. return -EINVAL;
  243. }
  244. mutex_lock(&memtrace_mutex);
  245. /* Free all previously allocated memory. */
  246. if (memtrace_size && memtrace_free_regions())
  247. goto out_unlock;
  248. if (!val) {
  249. rc = 0;
  250. goto out_unlock;
  251. }
  252. /* Allocate memory. */
  253. if (memtrace_init_regions_runtime(val))
  254. goto out_unlock;
  255. if (memtrace_init_debugfs())
  256. goto out_unlock;
  257. memtrace_size = val;
  258. rc = 0;
  259. out_unlock:
  260. mutex_unlock(&memtrace_mutex);
  261. return rc;
  262. }
  263. static int memtrace_enable_get(void *data, u64 *val)
  264. {
  265. *val = memtrace_size;
  266. return 0;
  267. }
  268. DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
  269. memtrace_enable_set, "0x%016llx\n");
  270. static int memtrace_init(void)
  271. {
  272. memtrace_debugfs_dir = debugfs_create_dir("memtrace",
  273. arch_debugfs_dir);
  274. debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
  275. NULL, &memtrace_init_fops);
  276. return 0;
  277. }
  278. machine_device_initcall(powernv, memtrace_init);