proc-macros.S 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * We need constants.h for:
  4. * VMA_VM_MM
  5. * VMA_VM_FLAGS
  6. * VM_EXEC
  7. */
  8. #include <asm/asm-offsets.h>
  9. #include <asm/thread_info.h>
  10. #ifdef CONFIG_CPU_V7M
  11. #include <asm/v7m.h>
  12. #endif
  13. /*
  14. * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
  15. */
  16. .macro vma_vm_mm, rd, rn
  17. ldr \rd, [\rn, #VMA_VM_MM]
  18. .endm
  19. /*
  20. * vma_vm_flags - get vma->vm_flags
  21. */
  22. .macro vma_vm_flags, rd, rn
  23. ldr \rd, [\rn, #VMA_VM_FLAGS]
  24. .endm
  25. /*
  26. * act_mm - get current->active_mm
  27. */
  28. .macro act_mm, rd
  29. get_current \rd
  30. .if (TSK_ACTIVE_MM > IMM12_MASK)
  31. add \rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK
  32. .endif
  33. ldr \rd, [\rd, #TSK_ACTIVE_MM & IMM12_MASK]
  34. .endm
  35. /*
  36. * mmid - get context id from mm pointer (mm->context.id)
  37. * note, this field is 64bit, so in big-endian the two words are swapped too.
  38. */
  39. .macro mmid, rd, rn
  40. #ifdef __ARMEB__
  41. ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
  42. #else
  43. ldr \rd, [\rn, #MM_CONTEXT_ID]
  44. #endif
  45. .endm
  46. /*
  47. * mask_asid - mask the ASID from the context ID
  48. */
  49. .macro asid, rd, rn
  50. and \rd, \rn, #255
  51. .endm
  52. .macro crval, clear, mmuset, ucset
  53. #ifdef CONFIG_MMU
  54. .word \clear
  55. .word \mmuset
  56. #else
  57. .word \clear
  58. .word \ucset
  59. #endif
  60. .endm
  61. /*
  62. * dcache_line_size - get the minimum D-cache line size from the CTR register
  63. * on ARMv7.
  64. */
  65. .macro dcache_line_size, reg, tmp
  66. #ifdef CONFIG_CPU_V7M
  67. movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
  68. movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
  69. ldr \tmp, [\tmp]
  70. #else
  71. mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
  72. #endif
  73. lsr \tmp, \tmp, #16
  74. and \tmp, \tmp, #0xf @ cache line size encoding
  75. mov \reg, #4 @ bytes per word
  76. mov \reg, \reg, lsl \tmp @ actual cache line size
  77. .endm
  78. /*
  79. * icache_line_size - get the minimum I-cache line size from the CTR register
  80. * on ARMv7.
  81. */
  82. .macro icache_line_size, reg, tmp
  83. #ifdef CONFIG_CPU_V7M
  84. movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
  85. movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
  86. ldr \tmp, [\tmp]
  87. #else
  88. mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
  89. #endif
  90. and \tmp, \tmp, #0xf @ cache line size encoding
  91. mov \reg, #4 @ bytes per word
  92. mov \reg, \reg, lsl \tmp @ actual cache line size
  93. .endm
  94. /*
  95. * Sanity check the PTE configuration for the code below - which makes
  96. * certain assumptions about how these bits are laid out.
  97. */
  98. #ifdef CONFIG_MMU
  99. #if L_PTE_SHARED != PTE_EXT_SHARED
  100. #error PTE shared bit mismatch
  101. #endif
  102. #if !defined (CONFIG_ARM_LPAE) && \
  103. (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
  104. L_PTE_PRESENT) > L_PTE_SHARED
  105. #error Invalid Linux PTE bit settings
  106. #endif
  107. #endif /* CONFIG_MMU */
  108. /*
  109. * The ARMv6 and ARMv7 set_pte_ext translation function.
  110. *
  111. * Permission translation:
  112. * YUWD APX AP1 AP0 SVC User
  113. * 0xxx 0 0 0 no acc no acc
  114. * 100x 1 0 1 r/o no acc
  115. * 10x0 1 0 1 r/o no acc
  116. * 1011 0 0 1 r/w no acc
  117. * 110x 1 1 1 r/o r/o
  118. * 11x0 1 1 1 r/o r/o
  119. * 1111 0 1 1 r/w r/w
  120. */
  121. .macro armv6_mt_table pfx
  122. \pfx\()_mt_table:
  123. .long 0x00 @ L_PTE_MT_UNCACHED
  124. .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
  125. .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
  126. .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
  127. .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
  128. .long 0x00 @ unused
  129. .long 0x00 @ L_PTE_MT_MINICACHE (not present)
  130. .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
  131. .long 0x00 @ unused
  132. .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
  133. .long 0x00 @ unused
  134. .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
  135. .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
  136. .long 0x00 @ unused
  137. .long 0x00 @ unused
  138. .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
  139. .endm
  140. .macro armv6_set_pte_ext pfx
  141. str r1, [r0], #2048 @ linux version
  142. bic r3, r1, #0x000003fc
  143. bic r3, r3, #PTE_TYPE_MASK
  144. orr r3, r3, r2
  145. orr r3, r3, #PTE_EXT_AP0 | 2
  146. adr ip, \pfx\()_mt_table
  147. and r2, r1, #L_PTE_MT_MASK
  148. ldr r2, [ip, r2]
  149. eor r1, r1, #L_PTE_DIRTY
  150. tst r1, #L_PTE_DIRTY|L_PTE_RDONLY
  151. orrne r3, r3, #PTE_EXT_APX
  152. tst r1, #L_PTE_USER
  153. orrne r3, r3, #PTE_EXT_AP1
  154. tstne r3, #PTE_EXT_APX
  155. @ user read-only -> kernel read-only
  156. bicne r3, r3, #PTE_EXT_AP0
  157. tst r1, #L_PTE_XN
  158. orrne r3, r3, #PTE_EXT_XN
  159. eor r3, r3, r2
  160. tst r1, #L_PTE_YOUNG
  161. tstne r1, #L_PTE_PRESENT
  162. moveq r3, #0
  163. tstne r1, #L_PTE_NONE
  164. movne r3, #0
  165. str r3, [r0]
  166. mcr p15, 0, r0, c7, c10, 1 @ flush_pte
  167. .endm
  168. /*
  169. * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
  170. * covering most CPUs except Xscale and Xscale 3.
  171. *
  172. * Permission translation:
  173. * YUWD AP SVC User
  174. * 0xxx 0x00 no acc no acc
  175. * 100x 0x00 r/o no acc
  176. * 10x0 0x00 r/o no acc
  177. * 1011 0x55 r/w no acc
  178. * 110x 0xaa r/w r/o
  179. * 11x0 0xaa r/w r/o
  180. * 1111 0xff r/w r/w
  181. */
  182. .macro armv3_set_pte_ext wc_disable=1
  183. str r1, [r0], #2048 @ linux version
  184. eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
  185. bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
  186. bic r2, r2, #PTE_TYPE_MASK
  187. orr r2, r2, #PTE_TYPE_SMALL
  188. tst r3, #L_PTE_USER @ user?
  189. orrne r2, r2, #PTE_SMALL_AP_URO_SRW
  190. tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty?
  191. orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
  192. tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
  193. movne r2, #0
  194. .if \wc_disable
  195. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  196. tst r2, #PTE_CACHEABLE
  197. bicne r2, r2, #PTE_BUFFERABLE
  198. #endif
  199. .endif
  200. str r2, [r0] @ hardware version
  201. .endm
  202. /*
  203. * Xscale set_pte_ext translation, split into two halves to cope
  204. * with work-arounds. r3 must be preserved by code between these
  205. * two macros.
  206. *
  207. * Permission translation:
  208. * YUWD AP SVC User
  209. * 0xxx 00 no acc no acc
  210. * 100x 00 r/o no acc
  211. * 10x0 00 r/o no acc
  212. * 1011 01 r/w no acc
  213. * 110x 10 r/w r/o
  214. * 11x0 10 r/w r/o
  215. * 1111 11 r/w r/w
  216. */
  217. .macro xscale_set_pte_ext_prologue
  218. str r1, [r0] @ linux version
  219. eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
  220. bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
  221. orr r2, r2, #PTE_TYPE_EXT @ extended page
  222. tst r3, #L_PTE_USER @ user?
  223. orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
  224. tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty?
  225. orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
  226. @ combined with user -> user r/w
  227. .endm
  228. .macro xscale_set_pte_ext_epilogue
  229. tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
  230. movne r2, #0 @ no -> fault
  231. str r2, [r0, #2048]! @ hardware version
  232. mov ip, #0
  233. mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
  234. mcr p15, 0, ip, c7, c10, 4 @ data write barrier
  235. .endm
  236. .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
  237. /*
  238. * If we are building for big.Little with branch predictor hardening,
  239. * we need the processor function tables to remain available after boot.
  240. */
  241. #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
  242. .section ".rodata"
  243. #endif
  244. .type \name\()_processor_functions, #object
  245. .align 2
  246. ENTRY(\name\()_processor_functions)
  247. .word \dabort
  248. .word \pabort
  249. .word cpu_\name\()_proc_init
  250. .word \bugs
  251. .word cpu_\name\()_proc_fin
  252. .word cpu_\name\()_reset
  253. .word cpu_\name\()_do_idle
  254. .word cpu_\name\()_dcache_clean_area
  255. .word cpu_\name\()_switch_mm
  256. .if \nommu
  257. .word 0
  258. .else
  259. .word cpu_\name\()_set_pte_ext
  260. .endif
  261. .if \suspend
  262. .word cpu_\name\()_suspend_size
  263. #ifdef CONFIG_ARM_CPU_SUSPEND
  264. .word cpu_\name\()_do_suspend
  265. .word cpu_\name\()_do_resume
  266. #else
  267. .word 0
  268. .word 0
  269. #endif
  270. .else
  271. .word 0
  272. .word 0
  273. .word 0
  274. .endif
  275. .size \name\()_processor_functions, . - \name\()_processor_functions
  276. #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
  277. .previous
  278. #endif
  279. .endm
  280. .macro define_cache_functions name:req
  281. .align 2
  282. .type \name\()_cache_fns, #object
  283. ENTRY(\name\()_cache_fns)
  284. .long \name\()_flush_icache_all
  285. .long \name\()_flush_kern_cache_all
  286. .long \name\()_flush_kern_cache_louis
  287. .long \name\()_flush_user_cache_all
  288. .long \name\()_flush_user_cache_range
  289. .long \name\()_coherent_kern_range
  290. .long \name\()_coherent_user_range
  291. .long \name\()_flush_kern_dcache_area
  292. .long \name\()_dma_map_area
  293. .long \name\()_dma_unmap_area
  294. .long \name\()_dma_flush_range
  295. .size \name\()_cache_fns, . - \name\()_cache_fns
  296. .endm
  297. .macro define_tlb_functions name:req, flags_up:req, flags_smp
  298. .type \name\()_tlb_fns, #object
  299. .align 2
  300. ENTRY(\name\()_tlb_fns)
  301. .long \name\()_flush_user_tlb_range
  302. .long \name\()_flush_kern_tlb_range
  303. .ifnb \flags_smp
  304. ALT_SMP(.long \flags_smp )
  305. ALT_UP(.long \flags_up )
  306. .else
  307. .long \flags_up
  308. .endif
  309. .size \name\()_tlb_fns, . - \name\()_tlb_fns
  310. .endm
  311. .macro globl_equ x, y
  312. .globl \x
  313. .equ \x, \y
  314. .endm
  315. .macro initfn, func, base
  316. .long \func - \base
  317. .endm
  318. /*
  319. * Macro to calculate the log2 size for the protection region
  320. * registers. This calculates rd = log2(size) - 1. tmp must
  321. * not be the same register as rd.
  322. */
  323. .macro pr_sz, rd, size, tmp
  324. mov \tmp, \size, lsr #12
  325. mov \rd, #11
  326. 1: movs \tmp, \tmp, lsr #1
  327. addne \rd, \rd, #1
  328. bne 1b
  329. .endm
  330. /*
  331. * Macro to generate a protection region register value
  332. * given a pre-masked address, size, and enable bit.
  333. * Corrupts size.
  334. */
  335. .macro pr_val, dest, addr, size, enable
  336. pr_sz \dest, \size, \size @ calculate log2(size) - 1
  337. orr \dest, \addr, \dest, lsl #1 @ mask in the region size
  338. orr \dest, \dest, \enable
  339. .endm