pmsa-v7.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * Based on linux/arch/arm/mm/nommu.c
  3. *
  4. * ARM PMSAv7 supporting functions.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/memblock.h>
  8. #include <linux/string.h>
  9. #include <asm/cacheflush.h>
  10. #include <asm/cp15.h>
  11. #include <asm/cputype.h>
  12. #include <asm/mpu.h>
  13. #include <asm/sections.h>
  14. #include "mm.h"
  15. struct region {
  16. phys_addr_t base;
  17. phys_addr_t size;
  18. unsigned long subreg;
  19. };
  20. static struct region __initdata mem[MPU_MAX_REGIONS];
  21. #ifdef CONFIG_XIP_KERNEL
  22. static struct region __initdata xip[MPU_MAX_REGIONS];
  23. #endif
  24. static unsigned int __initdata mpu_min_region_order;
  25. static unsigned int __initdata mpu_max_regions;
  26. static int __init __mpu_min_region_order(void);
  27. static int __init __mpu_max_regions(void);
  28. #ifndef CONFIG_CPU_V7M
  29. #define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
  30. #define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
  31. #define DRSR __ACCESS_CP15(c6, 0, c1, 2)
  32. #define IRSR __ACCESS_CP15(c6, 0, c1, 3)
  33. #define DRACR __ACCESS_CP15(c6, 0, c1, 4)
  34. #define IRACR __ACCESS_CP15(c6, 0, c1, 5)
  35. #define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
  36. /* Region number */
  37. static inline void rgnr_write(u32 v)
  38. {
  39. write_sysreg(v, RNGNR);
  40. }
  41. /* Data-side / unified region attributes */
  42. /* Region access control register */
  43. static inline void dracr_write(u32 v)
  44. {
  45. write_sysreg(v, DRACR);
  46. }
  47. /* Region size register */
  48. static inline void drsr_write(u32 v)
  49. {
  50. write_sysreg(v, DRSR);
  51. }
  52. /* Region base address register */
  53. static inline void drbar_write(u32 v)
  54. {
  55. write_sysreg(v, DRBAR);
  56. }
  57. static inline u32 drbar_read(void)
  58. {
  59. return read_sysreg(DRBAR);
  60. }
  61. /* Optional instruction-side region attributes */
  62. /* I-side Region access control register */
  63. static inline void iracr_write(u32 v)
  64. {
  65. write_sysreg(v, IRACR);
  66. }
  67. /* I-side Region size register */
  68. static inline void irsr_write(u32 v)
  69. {
  70. write_sysreg(v, IRSR);
  71. }
  72. /* I-side Region base address register */
  73. static inline void irbar_write(u32 v)
  74. {
  75. write_sysreg(v, IRBAR);
  76. }
  77. static inline u32 irbar_read(void)
  78. {
  79. return read_sysreg(IRBAR);
  80. }
  81. #else
  82. static inline void rgnr_write(u32 v)
  83. {
  84. writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
  85. }
  86. /* Data-side / unified region attributes */
  87. /* Region access control register */
  88. static inline void dracr_write(u32 v)
  89. {
  90. u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
  91. writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
  92. }
  93. /* Region size register */
  94. static inline void drsr_write(u32 v)
  95. {
  96. u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
  97. writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
  98. }
  99. /* Region base address register */
  100. static inline void drbar_write(u32 v)
  101. {
  102. writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
  103. }
  104. static inline u32 drbar_read(void)
  105. {
  106. return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
  107. }
  108. /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
  109. static inline void iracr_write(u32 v) {}
  110. static inline void irsr_write(u32 v) {}
  111. static inline void irbar_write(u32 v) {}
  112. static inline unsigned long irbar_read(void) {return 0;}
  113. #endif
  114. static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
  115. {
  116. unsigned long subreg, bslots, sslots;
  117. phys_addr_t abase = base & ~(size - 1);
  118. phys_addr_t asize = base + size - abase;
  119. phys_addr_t p2size = 1 << __fls(asize);
  120. phys_addr_t bdiff, sdiff;
  121. if (p2size != asize)
  122. p2size *= 2;
  123. bdiff = base - abase;
  124. sdiff = p2size - asize;
  125. subreg = p2size / PMSAv7_NR_SUBREGS;
  126. if ((bdiff % subreg) || (sdiff % subreg))
  127. return false;
  128. bslots = bdiff / subreg;
  129. sslots = sdiff / subreg;
  130. if (bslots || sslots) {
  131. int i;
  132. if (subreg < PMSAv7_MIN_SUBREG_SIZE)
  133. return false;
  134. if (bslots + sslots > PMSAv7_NR_SUBREGS)
  135. return false;
  136. for (i = 0; i < bslots; i++)
  137. _set_bit(i, &region->subreg);
  138. for (i = 1; i <= sslots; i++)
  139. _set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
  140. }
  141. region->base = abase;
  142. region->size = p2size;
  143. return true;
  144. }
  145. static int __init allocate_region(phys_addr_t base, phys_addr_t size,
  146. unsigned int limit, struct region *regions)
  147. {
  148. int count = 0;
  149. phys_addr_t diff = size;
  150. int attempts = MPU_MAX_REGIONS;
  151. while (diff) {
  152. /* Try cover region as is (maybe with help of subregions) */
  153. if (try_split_region(base, size, &regions[count])) {
  154. count++;
  155. base += size;
  156. diff -= size;
  157. size = diff;
  158. } else {
  159. /*
  160. * Maximum aligned region might overflow phys_addr_t
  161. * if "base" is 0. Hence we keep everything below 4G
  162. * until we take the smaller of the aligned region
  163. * size ("asize") and rounded region size ("p2size"),
  164. * one of which is guaranteed to be smaller than the
  165. * maximum physical address.
  166. */
  167. phys_addr_t asize = (base - 1) ^ base;
  168. phys_addr_t p2size = (1 << __fls(diff)) - 1;
  169. size = asize < p2size ? asize + 1 : p2size + 1;
  170. }
  171. if (count > limit)
  172. break;
  173. if (!attempts)
  174. break;
  175. attempts--;
  176. }
  177. return count;
  178. }
  179. /* MPU initialisation functions */
  180. void __init pmsav7_adjust_lowmem_bounds(void)
  181. {
  182. phys_addr_t specified_mem_size = 0, total_mem_size = 0;
  183. phys_addr_t mem_start;
  184. phys_addr_t mem_end;
  185. phys_addr_t reg_start, reg_end;
  186. unsigned int mem_max_regions;
  187. bool first = true;
  188. int num;
  189. u64 i;
  190. /* Free-up PMSAv7_PROBE_REGION */
  191. mpu_min_region_order = __mpu_min_region_order();
  192. /* How many regions are supported */
  193. mpu_max_regions = __mpu_max_regions();
  194. mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
  195. /* We need to keep one slot for background region */
  196. mem_max_regions--;
  197. #ifndef CONFIG_CPU_V7M
  198. /* ... and one for vectors */
  199. mem_max_regions--;
  200. #endif
  201. #ifdef CONFIG_XIP_KERNEL
  202. /* plus some regions to cover XIP ROM */
  203. num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
  204. mem_max_regions, xip);
  205. mem_max_regions -= num;
  206. #endif
  207. for_each_mem_range(i, &reg_start, &reg_end) {
  208. if (first) {
  209. phys_addr_t phys_offset = PHYS_OFFSET;
  210. /*
  211. * Initially only use memory continuous from
  212. * PHYS_OFFSET */
  213. if (reg_start != phys_offset)
  214. panic("First memory bank must be contiguous from PHYS_OFFSET");
  215. mem_start = reg_start;
  216. mem_end = reg_end;
  217. specified_mem_size = mem_end - mem_start;
  218. first = false;
  219. } else {
  220. /*
  221. * memblock auto merges contiguous blocks, remove
  222. * all blocks afterwards in one go (we can't remove
  223. * blocks separately while iterating)
  224. */
  225. pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
  226. &mem_end, &reg_start);
  227. memblock_remove(reg_start, 0 - reg_start);
  228. break;
  229. }
  230. }
  231. memset(mem, 0, sizeof(mem));
  232. num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
  233. for (i = 0; i < num; i++) {
  234. unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
  235. total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
  236. pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
  237. &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
  238. }
  239. if (total_mem_size != specified_mem_size) {
  240. pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
  241. &specified_mem_size, &total_mem_size);
  242. memblock_remove(mem_start + total_mem_size,
  243. specified_mem_size - total_mem_size);
  244. }
  245. }
  246. static int __init __mpu_max_regions(void)
  247. {
  248. /*
  249. * We don't support a different number of I/D side regions so if we
  250. * have separate instruction and data memory maps then return
  251. * whichever side has a smaller number of supported regions.
  252. */
  253. u32 dregions, iregions, mpuir;
  254. mpuir = read_cpuid_mputype();
  255. dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
  256. /* Check for separate d-side and i-side memory maps */
  257. if (mpuir & MPUIR_nU)
  258. iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
  259. /* Use the smallest of the two maxima */
  260. return min(dregions, iregions);
  261. }
  262. static int __init mpu_iside_independent(void)
  263. {
  264. /* MPUIR.nU specifies whether there is *not* a unified memory map */
  265. return read_cpuid_mputype() & MPUIR_nU;
  266. }
  267. static int __init __mpu_min_region_order(void)
  268. {
  269. u32 drbar_result, irbar_result;
  270. /* We've kept a region free for this probing */
  271. rgnr_write(PMSAv7_PROBE_REGION);
  272. isb();
  273. /*
  274. * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
  275. * region order
  276. */
  277. drbar_write(0xFFFFFFFC);
  278. drbar_result = irbar_result = drbar_read();
  279. drbar_write(0x0);
  280. /* If the MPU is non-unified, we use the larger of the two minima*/
  281. if (mpu_iside_independent()) {
  282. irbar_write(0xFFFFFFFC);
  283. irbar_result = irbar_read();
  284. irbar_write(0x0);
  285. }
  286. isb(); /* Ensure that MPU region operations have completed */
  287. /* Return whichever result is larger */
  288. return __ffs(max(drbar_result, irbar_result));
  289. }
  290. static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
  291. unsigned int size_order, unsigned int properties,
  292. unsigned int subregions, bool need_flush)
  293. {
  294. u32 size_data;
  295. /* We kept a region free for probing resolution of MPU regions*/
  296. if (number > mpu_max_regions
  297. || number >= MPU_MAX_REGIONS)
  298. return -ENOENT;
  299. if (size_order > 32)
  300. return -ENOMEM;
  301. if (size_order < mpu_min_region_order)
  302. return -ENOMEM;
  303. /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
  304. size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
  305. size_data |= subregions << PMSAv7_RSR_SD;
  306. if (need_flush)
  307. flush_cache_all();
  308. dsb(); /* Ensure all previous data accesses occur with old mappings */
  309. rgnr_write(number);
  310. isb();
  311. drbar_write(start);
  312. dracr_write(properties);
  313. isb(); /* Propagate properties before enabling region */
  314. drsr_write(size_data);
  315. /* Check for independent I-side registers */
  316. if (mpu_iside_independent()) {
  317. irbar_write(start);
  318. iracr_write(properties);
  319. isb();
  320. irsr_write(size_data);
  321. }
  322. isb();
  323. /* Store region info (we treat i/d side the same, so only store d) */
  324. mpu_rgn_info.rgns[number].dracr = properties;
  325. mpu_rgn_info.rgns[number].drbar = start;
  326. mpu_rgn_info.rgns[number].drsr = size_data;
  327. mpu_rgn_info.used++;
  328. return 0;
  329. }
  330. /*
  331. * Set up default MPU regions, doing nothing if there is no MPU
  332. */
  333. void __init pmsav7_setup(void)
  334. {
  335. int i, region = 0, err = 0;
  336. /* Setup MPU (order is important) */
  337. /* Background */
  338. err |= mpu_setup_region(region++, 0, 32,
  339. PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
  340. 0, false);
  341. #ifdef CONFIG_XIP_KERNEL
  342. /* ROM */
  343. for (i = 0; i < ARRAY_SIZE(xip); i++) {
  344. /*
  345. * In case we overwrite RAM region we set earlier in
  346. * head-nommu.S (which is cachable) all subsequent
  347. * data access till we setup RAM bellow would be done
  348. * with BG region (which is uncachable), thus we need
  349. * to clean and invalidate cache.
  350. */
  351. bool need_flush = region == PMSAv7_RAM_REGION;
  352. if (!xip[i].size)
  353. continue;
  354. err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
  355. PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
  356. xip[i].subreg, need_flush);
  357. }
  358. #endif
  359. /* RAM */
  360. for (i = 0; i < ARRAY_SIZE(mem); i++) {
  361. if (!mem[i].size)
  362. continue;
  363. err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
  364. PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
  365. mem[i].subreg, false);
  366. }
  367. /* Vectors */
  368. #ifndef CONFIG_CPU_V7M
  369. err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
  370. PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
  371. 0, false);
  372. #endif
  373. if (err) {
  374. panic("MPU region initialization failure! %d", err);
  375. } else {
  376. pr_info("Using ARMv7 PMSA Compliant MPU. "
  377. "Region independence: %s, Used %d of %d regions\n",
  378. mpu_iside_independent() ? "Yes" : "No",
  379. mpu_rgn_info.used, mpu_max_regions);
  380. }
  381. }