head-nommu.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/kernel/head-nommu.S
  4. *
  5. * Copyright (C) 1994-2002 Russell King
  6. * Copyright (C) 2003-2006 Hyok S. Choi
  7. *
  8. * Common kernel startup code (non-paged MM)
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/init.h>
  12. #include <linux/errno.h>
  13. #include <asm/assembler.h>
  14. #include <asm/ptrace.h>
  15. #include <asm/asm-offsets.h>
  16. #include <asm/memory.h>
  17. #include <asm/cp15.h>
  18. #include <asm/thread_info.h>
  19. #include <asm/v7m.h>
  20. #include <asm/mpu.h>
  21. #include <asm/page.h>
  22. /*
  23. * Kernel startup entry point.
  24. * ---------------------------
  25. *
  26. * This is normally called from the decompressor code. The requirements
  27. * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
  28. * r1 = machine nr.
  29. *
  30. * See linux/arch/arm/tools/mach-types for the complete list of machine
  31. * numbers for r1.
  32. *
  33. */
  34. __HEAD
  35. #ifdef CONFIG_CPU_THUMBONLY
  36. .thumb
  37. ENTRY(stext)
  38. #else
  39. .arm
  40. ENTRY(stext)
  41. THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
  42. THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
  43. THUMB( .thumb ) @ switch to Thumb now.
  44. THUMB(1: )
  45. #endif
  46. #ifdef CONFIG_ARM_VIRT_EXT
  47. bl __hyp_stub_install
  48. #endif
  49. @ ensure svc mode and all interrupts masked
  50. safe_svcmode_maskall r9
  51. @ and irqs disabled
  52. #if defined(CONFIG_CPU_CP15)
  53. mrc p15, 0, r9, c0, c0 @ get processor id
  54. #elif defined(CONFIG_CPU_V7M)
  55. ldr r9, =BASEADDR_V7M_SCB
  56. ldr r9, [r9, V7M_SCB_CPUID]
  57. #else
  58. ldr r9, =CONFIG_PROCESSOR_ID
  59. #endif
  60. bl __lookup_processor_type @ r5=procinfo r9=cpuid
  61. movs r10, r5 @ invalid processor (r5=0)?
  62. beq __error_p @ yes, error 'p'
  63. #ifdef CONFIG_ARM_MPU
  64. bl __setup_mpu
  65. #endif
  66. badr lr, 1f @ return (PIC) address
  67. ldr r12, [r10, #PROCINFO_INITFUNC]
  68. add r12, r12, r10
  69. ret r12
  70. 1: ldr lr, =__mmap_switched
  71. b __after_proc_init
  72. ENDPROC(stext)
  73. #ifdef CONFIG_SMP
  74. .text
  75. ENTRY(secondary_startup)
  76. /*
  77. * Common entry point for secondary CPUs.
  78. *
  79. * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
  80. * the processor type - there is no need to check the machine type
  81. * as it has already been validated by the primary processor.
  82. */
  83. #ifdef CONFIG_ARM_VIRT_EXT
  84. bl __hyp_stub_install_secondary
  85. #endif
  86. safe_svcmode_maskall r9
  87. #ifndef CONFIG_CPU_CP15
  88. ldr r9, =CONFIG_PROCESSOR_ID
  89. #else
  90. mrc p15, 0, r9, c0, c0 @ get processor id
  91. #endif
  92. bl __lookup_processor_type @ r5=procinfo r9=cpuid
  93. movs r10, r5 @ invalid processor?
  94. beq __error_p @ yes, error 'p'
  95. ldr r7, __secondary_data
  96. #ifdef CONFIG_ARM_MPU
  97. bl __secondary_setup_mpu @ Initialize the MPU
  98. #endif
  99. badr lr, 1f @ return (PIC) address
  100. ldr r12, [r10, #PROCINFO_INITFUNC]
  101. add r12, r12, r10
  102. ret r12
  103. 1: bl __after_proc_init
  104. ldr r7, __secondary_data @ reload r7
  105. ldr sp, [r7, #12] @ set up the stack pointer
  106. ldr r0, [r7, #16] @ set up task pointer
  107. mov fp, #0
  108. b secondary_start_kernel
  109. ENDPROC(secondary_startup)
  110. .type __secondary_data, %object
  111. __secondary_data:
  112. .long secondary_data
  113. #endif /* CONFIG_SMP */
  114. /*
  115. * Set the Control Register and Read the process ID.
  116. */
  117. .text
  118. __after_proc_init:
  119. M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
  120. M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
  121. #ifdef CONFIG_ARM_MPU
  122. M_CLASS(ldr r3, [r12, 0x50])
  123. AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
  124. and r3, r3, #(MMFR0_PMSA) @ PMSA field
  125. teq r3, #(MMFR0_PMSAv7) @ PMSA v7
  126. beq 1f
  127. teq r3, #(MMFR0_PMSAv8) @ PMSA v8
  128. /*
  129. * Memory region attributes for PMSAv8:
  130. *
  131. * n = AttrIndx[2:0]
  132. * n MAIR
  133. * DEVICE_nGnRnE 000 00000000
  134. * NORMAL 001 11111111
  135. */
  136. ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
  137. PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
  138. AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
  139. M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
  140. moveq r3, #0
  141. AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
  142. M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
  143. 1:
  144. #endif
  145. #ifdef CONFIG_CPU_CP15
  146. /*
  147. * CP15 system control register value returned in r0 from
  148. * the CPU init function.
  149. */
  150. #ifdef CONFIG_ARM_MPU
  151. biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
  152. orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
  153. #endif
  154. #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
  155. orr r0, r0, #CR_A
  156. #else
  157. bic r0, r0, #CR_A
  158. #endif
  159. #ifdef CONFIG_CPU_DCACHE_DISABLE
  160. bic r0, r0, #CR_C
  161. #endif
  162. #ifdef CONFIG_CPU_BPREDICT_DISABLE
  163. bic r0, r0, #CR_Z
  164. #endif
  165. #ifdef CONFIG_CPU_ICACHE_DISABLE
  166. bic r0, r0, #CR_I
  167. #endif
  168. mcr p15, 0, r0, c1, c0, 0 @ write control reg
  169. instr_sync
  170. #elif defined (CONFIG_CPU_V7M)
  171. #ifdef CONFIG_ARM_MPU
  172. ldreq r3, [r12, MPU_CTRL]
  173. biceq r3, #MPU_CTRL_PRIVDEFENA
  174. orreq r3, #MPU_CTRL_ENABLE
  175. streq r3, [r12, MPU_CTRL]
  176. isb
  177. #endif
  178. /* For V7M systems we want to modify the CCR similarly to the SCTLR */
  179. #ifdef CONFIG_CPU_DCACHE_DISABLE
  180. bic r0, r0, #V7M_SCB_CCR_DC
  181. #endif
  182. #ifdef CONFIG_CPU_BPREDICT_DISABLE
  183. bic r0, r0, #V7M_SCB_CCR_BP
  184. #endif
  185. #ifdef CONFIG_CPU_ICACHE_DISABLE
  186. bic r0, r0, #V7M_SCB_CCR_IC
  187. #endif
  188. str r0, [r12, V7M_SCB_CCR]
  189. /* Pass exc_ret to __mmap_switched */
  190. mov r0, r10
  191. #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
  192. ret lr
  193. ENDPROC(__after_proc_init)
  194. .ltorg
  195. #ifdef CONFIG_ARM_MPU
  196. #ifndef CONFIG_CPU_V7M
  197. /* Set which MPU region should be programmed */
  198. .macro set_region_nr tmp, rgnr, unused
  199. mov \tmp, \rgnr @ Use static region numbers
  200. mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
  201. .endm
  202. /* Setup a single MPU region, either D or I side (D-side for unified) */
  203. .macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
  204. mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
  205. mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
  206. mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
  207. .endm
  208. #else
  209. .macro set_region_nr tmp, rgnr, base
  210. mov \tmp, \rgnr
  211. str \tmp, [\base, #PMSAv7_RNR]
  212. .endm
  213. .macro setup_region bar, acr, sr, unused, base
  214. lsl \acr, \acr, #16
  215. orr \acr, \acr, \sr
  216. str \bar, [\base, #PMSAv7_RBAR]
  217. str \acr, [\base, #PMSAv7_RASR]
  218. .endm
  219. #endif
  220. /*
  221. * Setup the MPU and initial MPU Regions. We create the following regions:
  222. * Region 0: Use this for probing the MPU details, so leave disabled.
  223. * Region 1: Background region - covers the whole of RAM as strongly ordered
  224. * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
  225. * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
  226. *
  227. * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
  228. */
  229. __HEAD
  230. ENTRY(__setup_mpu)
  231. /* Probe for v7 PMSA compliance */
  232. M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
  233. M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
  234. AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
  235. M_CLASS(ldr r0, [r12, 0x50])
  236. and r0, r0, #(MMFR0_PMSA) @ PMSA field
  237. teq r0, #(MMFR0_PMSAv7) @ PMSA v7
  238. beq __setup_pmsa_v7
  239. teq r0, #(MMFR0_PMSAv8) @ PMSA v8
  240. beq __setup_pmsa_v8
  241. ret lr
  242. ENDPROC(__setup_mpu)
  243. ENTRY(__setup_pmsa_v7)
  244. /* Calculate the size of a region covering just the kernel */
  245. ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
  246. ldr r6, =(_end) @ Cover whole kernel
  247. sub r6, r6, r5 @ Minimum size of region to map
  248. clz r6, r6 @ Region size must be 2^N...
  249. rsb r6, r6, #31 @ ...so round up region size
  250. lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
  251. orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
  252. /* Determine whether the D/I-side memory map is unified. We set the
  253. * flags here and continue to use them for the rest of this function */
  254. AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
  255. M_CLASS(ldr r0, [r12, #MPU_TYPE])
  256. ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
  257. bxeq lr
  258. tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
  259. /* Setup second region first to free up r6 */
  260. set_region_nr r0, #PMSAv7_RAM_REGION, r12
  261. isb
  262. /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
  263. ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
  264. ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
  265. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
  266. beq 1f @ Memory-map not unified
  267. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
  268. 1: isb
  269. /* First/background region */
  270. set_region_nr r0, #PMSAv7_BG_REGION, r12
  271. isb
  272. /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
  273. mov r0, #0 @ BG region starts at 0x0
  274. ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
  275. mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
  276. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
  277. beq 2f @ Memory-map not unified
  278. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
  279. 2: isb
  280. #ifdef CONFIG_XIP_KERNEL
  281. set_region_nr r0, #PMSAv7_ROM_REGION, r12
  282. isb
  283. ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
  284. ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
  285. ldr r6, =(_exiprom) @ ROM end
  286. sub r6, r6, r0 @ Minimum size of region to map
  287. clz r6, r6 @ Region size must be 2^N...
  288. rsb r6, r6, #31 @ ...so round up region size
  289. lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
  290. orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
  291. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
  292. beq 3f @ Memory-map not unified
  293. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
  294. 3: isb
  295. #endif
  296. ret lr
  297. ENDPROC(__setup_pmsa_v7)
  298. ENTRY(__setup_pmsa_v8)
  299. mov r0, #0
  300. AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
  301. M_CLASS(str r0, [r12, #PMSAv8_RNR])
  302. isb
  303. #ifdef CONFIG_XIP_KERNEL
  304. ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
  305. ldr r6, =(_exiprom) @ ROM end
  306. sub r6, r6, #1
  307. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  308. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
  309. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
  310. AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
  311. AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
  312. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
  313. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
  314. #endif
  315. ldr r5, =KERNEL_START
  316. ldr r6, =KERNEL_END
  317. sub r6, r6, #1
  318. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  319. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
  320. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
  321. AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
  322. AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
  323. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
  324. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
  325. /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
  326. #ifdef CONFIG_XIP_KERNEL
  327. ldr r6, =KERNEL_START
  328. ldr r5, =CONFIG_XIP_PHYS_ADDR
  329. cmp r6, r5
  330. movcs r6, r5
  331. #else
  332. ldr r6, =KERNEL_START
  333. #endif
  334. cmp r6, #0
  335. beq 1f
  336. mov r5, #0
  337. sub r6, r6, #1
  338. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  339. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  340. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  341. AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
  342. AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
  343. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
  344. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
  345. 1:
  346. /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
  347. #ifdef CONFIG_XIP_KERNEL
  348. ldr r5, =KERNEL_END
  349. ldr r6, =(_exiprom)
  350. cmp r5, r6
  351. movcc r5, r6
  352. #else
  353. ldr r5, =KERNEL_END
  354. #endif
  355. mov r6, #0xffffffff
  356. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  357. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  358. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  359. AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
  360. AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
  361. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
  362. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
  363. #ifdef CONFIG_XIP_KERNEL
  364. /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
  365. ldr r5, =(_exiprom)
  366. ldr r6, =KERNEL_END
  367. cmp r5, r6
  368. movcs r5, r6
  369. ldr r6, =KERNEL_START
  370. ldr r0, =CONFIG_XIP_PHYS_ADDR
  371. cmp r6, r0
  372. movcc r6, r0
  373. sub r6, r6, #1
  374. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  375. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  376. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  377. #ifdef CONFIG_CPU_V7M
  378. /* There is no alias for n == 4 */
  379. mov r0, #4
  380. str r0, [r12, #PMSAv8_RNR] @ PRSEL
  381. isb
  382. str r5, [r12, #PMSAv8_RBAR_A(0)]
  383. str r6, [r12, #PMSAv8_RLAR_A(0)]
  384. #else
  385. mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
  386. mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
  387. #endif
  388. #endif
  389. ret lr
  390. ENDPROC(__setup_pmsa_v8)
  391. #ifdef CONFIG_SMP
  392. /*
  393. * r6: pointer at mpu_rgn_info
  394. */
  395. .text
  396. ENTRY(__secondary_setup_mpu)
  397. /* Use MPU region info supplied by __cpu_up */
  398. ldr r6, [r7] @ get secondary_data.mpu_rgn_info
  399. /* Probe for v7 PMSA compliance */
  400. mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
  401. and r0, r0, #(MMFR0_PMSA) @ PMSA field
  402. teq r0, #(MMFR0_PMSAv7) @ PMSA v7
  403. beq __secondary_setup_pmsa_v7
  404. teq r0, #(MMFR0_PMSAv8) @ PMSA v8
  405. beq __secondary_setup_pmsa_v8
  406. b __error_p
  407. ENDPROC(__secondary_setup_mpu)
  408. /*
  409. * r6: pointer at mpu_rgn_info
  410. */
  411. ENTRY(__secondary_setup_pmsa_v7)
  412. /* Determine whether the D/I-side memory map is unified. We set the
  413. * flags here and continue to use them for the rest of this function */
  414. mrc p15, 0, r0, c0, c0, 4 @ MPUIR
  415. ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
  416. beq __error_p
  417. ldr r4, [r6, #MPU_RNG_INFO_USED]
  418. mov r5, #MPU_RNG_SIZE
  419. add r3, r6, #MPU_RNG_INFO_RNGS
  420. mla r3, r4, r5, r3
  421. 1:
  422. tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
  423. sub r3, r3, #MPU_RNG_SIZE
  424. sub r4, r4, #1
  425. set_region_nr r0, r4
  426. isb
  427. ldr r0, [r3, #MPU_RGN_DRBAR]
  428. ldr r6, [r3, #MPU_RGN_DRSR]
  429. ldr r5, [r3, #MPU_RGN_DRACR]
  430. setup_region r0, r5, r6, PMSAv7_DATA_SIDE
  431. beq 2f
  432. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
  433. 2: isb
  434. mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
  435. cmp r4, #0
  436. bgt 1b
  437. ret lr
  438. ENDPROC(__secondary_setup_pmsa_v7)
  439. ENTRY(__secondary_setup_pmsa_v8)
  440. ldr r4, [r6, #MPU_RNG_INFO_USED]
  441. #ifndef CONFIG_XIP_KERNEL
  442. add r4, r4, #1
  443. #endif
  444. mov r5, #MPU_RNG_SIZE
  445. add r3, r6, #MPU_RNG_INFO_RNGS
  446. mla r3, r4, r5, r3
  447. 1:
  448. sub r3, r3, #MPU_RNG_SIZE
  449. sub r4, r4, #1
  450. mcr p15, 0, r4, c6, c2, 1 @ PRSEL
  451. isb
  452. ldr r5, [r3, #MPU_RGN_PRBAR]
  453. ldr r6, [r3, #MPU_RGN_PRLAR]
  454. mcr p15, 0, r5, c6, c3, 0 @ PRBAR
  455. mcr p15, 0, r6, c6, c3, 1 @ PRLAR
  456. cmp r4, #0
  457. bgt 1b
  458. ret lr
  459. ENDPROC(__secondary_setup_pmsa_v8)
  460. #endif /* CONFIG_SMP */
  461. #endif /* CONFIG_ARM_MPU */
  462. #include "head-common.S"