early_ioremap.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Provide common bits of early_ioremap() support for architectures needing
  4. * temporary mappings during boot before ioremap() is available.
  5. *
  6. * This is mostly a direct copy of the x86 early_ioremap implementation.
  7. *
  8. * (C) Copyright 1995 1996, 2014 Linus Torvalds
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/io.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/mm.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/fixmap.h>
  19. #include <asm/early_ioremap.h>
  20. #include "internal.h"
  21. #ifdef CONFIG_MMU
  22. static int early_ioremap_debug __initdata;
  23. static int __init early_ioremap_debug_setup(char *str)
  24. {
  25. early_ioremap_debug = 1;
  26. return 0;
  27. }
  28. early_param("early_ioremap_debug", early_ioremap_debug_setup);
  29. static int after_paging_init __initdata;
  30. pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
  31. unsigned long size,
  32. pgprot_t prot)
  33. {
  34. return prot;
  35. }
  36. void __init early_ioremap_reset(void)
  37. {
  38. after_paging_init = 1;
  39. }
  40. /*
  41. * Generally, ioremap() is available after paging_init() has been called.
  42. * Architectures wanting to allow early_ioremap after paging_init() can
  43. * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
  44. */
  45. #ifndef __late_set_fixmap
  46. static inline void __init __late_set_fixmap(enum fixed_addresses idx,
  47. phys_addr_t phys, pgprot_t prot)
  48. {
  49. BUG();
  50. }
  51. #endif
  52. #ifndef __late_clear_fixmap
  53. static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
  54. {
  55. BUG();
  56. }
  57. #endif
  58. static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
  59. static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
  60. static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
  61. void __init early_ioremap_setup(void)
  62. {
  63. int i;
  64. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  65. if (WARN_ON(prev_map[i]))
  66. break;
  67. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  68. slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
  69. }
  70. static int __init check_early_ioremap_leak(void)
  71. {
  72. int count = 0;
  73. int i;
  74. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  75. if (prev_map[i])
  76. count++;
  77. if (WARN(count, KERN_WARNING
  78. "Debug warning: early ioremap leak of %d areas detected.\n"
  79. "please boot with early_ioremap_debug and report the dmesg.\n",
  80. count))
  81. return 1;
  82. return 0;
  83. }
  84. late_initcall(check_early_ioremap_leak);
  85. static void __init __iomem *
  86. __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
  87. {
  88. unsigned long offset;
  89. resource_size_t last_addr;
  90. unsigned int nrpages;
  91. enum fixed_addresses idx;
  92. int i, slot;
  93. WARN_ON(system_state >= SYSTEM_RUNNING);
  94. slot = -1;
  95. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  96. if (!prev_map[i]) {
  97. slot = i;
  98. break;
  99. }
  100. }
  101. if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
  102. __func__, &phys_addr, size))
  103. return NULL;
  104. /* Don't allow wraparound or zero size */
  105. last_addr = phys_addr + size - 1;
  106. if (WARN_ON(!size || last_addr < phys_addr))
  107. return NULL;
  108. prev_size[slot] = size;
  109. /*
  110. * Mappings have to be page-aligned
  111. */
  112. offset = offset_in_page(phys_addr);
  113. phys_addr &= PAGE_MASK;
  114. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  115. /*
  116. * Mappings have to fit in the FIX_BTMAP area.
  117. */
  118. nrpages = size >> PAGE_SHIFT;
  119. if (WARN_ON(nrpages > NR_FIX_BTMAPS))
  120. return NULL;
  121. /*
  122. * Ok, go for it..
  123. */
  124. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  125. while (nrpages > 0) {
  126. if (after_paging_init)
  127. __late_set_fixmap(idx, phys_addr, prot);
  128. else
  129. __early_set_fixmap(idx, phys_addr, prot);
  130. phys_addr += PAGE_SIZE;
  131. --idx;
  132. --nrpages;
  133. }
  134. WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
  135. __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
  136. prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
  137. return prev_map[slot];
  138. }
  139. void __init early_iounmap(void __iomem *addr, unsigned long size)
  140. {
  141. unsigned long virt_addr;
  142. unsigned long offset;
  143. unsigned int nrpages;
  144. enum fixed_addresses idx;
  145. int i, slot;
  146. slot = -1;
  147. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  148. if (prev_map[i] == addr) {
  149. slot = i;
  150. break;
  151. }
  152. }
  153. if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n",
  154. __func__, addr, size))
  155. return;
  156. if (WARN(prev_size[slot] != size,
  157. "%s(%p, %08lx) [%d] size not consistent %08lx\n",
  158. __func__, addr, size, slot, prev_size[slot]))
  159. return;
  160. WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n",
  161. __func__, addr, size, slot);
  162. virt_addr = (unsigned long)addr;
  163. if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
  164. return;
  165. offset = offset_in_page(virt_addr);
  166. nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
  167. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  168. while (nrpages > 0) {
  169. if (after_paging_init)
  170. __late_clear_fixmap(idx);
  171. else
  172. __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
  173. --idx;
  174. --nrpages;
  175. }
  176. prev_map[slot] = NULL;
  177. }
  178. /* Remap an IO device */
  179. void __init __iomem *
  180. early_ioremap(resource_size_t phys_addr, unsigned long size)
  181. {
  182. return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
  183. }
  184. /* Remap memory */
  185. void __init *
  186. early_memremap(resource_size_t phys_addr, unsigned long size)
  187. {
  188. pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
  189. FIXMAP_PAGE_NORMAL);
  190. return (__force void *)__early_ioremap(phys_addr, size, prot);
  191. }
  192. #ifdef FIXMAP_PAGE_RO
  193. void __init *
  194. early_memremap_ro(resource_size_t phys_addr, unsigned long size)
  195. {
  196. pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
  197. FIXMAP_PAGE_RO);
  198. return (__force void *)__early_ioremap(phys_addr, size, prot);
  199. }
  200. #endif
  201. #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
  202. void __init *
  203. early_memremap_prot(resource_size_t phys_addr, unsigned long size,
  204. unsigned long prot_val)
  205. {
  206. return (__force void *)__early_ioremap(phys_addr, size,
  207. __pgprot(prot_val));
  208. }
  209. #endif
  210. #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
  211. void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
  212. {
  213. unsigned long slop, clen;
  214. char *p;
  215. while (size) {
  216. slop = offset_in_page(src);
  217. clen = size;
  218. if (clen > MAX_MAP_CHUNK - slop)
  219. clen = MAX_MAP_CHUNK - slop;
  220. p = early_memremap(src & PAGE_MASK, clen + slop);
  221. memcpy(dest, p + slop, clen);
  222. early_memunmap(p, clen + slop);
  223. dest += clen;
  224. src += clen;
  225. size -= clen;
  226. }
  227. }
  228. #else /* CONFIG_MMU */
  229. void __init __iomem *
  230. early_ioremap(resource_size_t phys_addr, unsigned long size)
  231. {
  232. return (__force void __iomem *)phys_addr;
  233. }
  234. /* Remap memory */
  235. void __init *
  236. early_memremap(resource_size_t phys_addr, unsigned long size)
  237. {
  238. return (void *)phys_addr;
  239. }
  240. void __init *
  241. early_memremap_ro(resource_size_t phys_addr, unsigned long size)
  242. {
  243. return (void *)phys_addr;
  244. }
  245. void __init early_iounmap(void __iomem *addr, unsigned long size)
  246. {
  247. }
  248. #endif /* CONFIG_MMU */
  249. void __init early_memunmap(void *addr, unsigned long size)
  250. {
  251. early_iounmap((__force void __iomem *)addr, size);
  252. }