proc-arm940.S 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
  4. *
  5. * Copyright (C) 2004-2006 Hyok S. Choi ([email protected])
  6. */
  7. #include <linux/linkage.h>
  8. #include <linux/init.h>
  9. #include <linux/pgtable.h>
  10. #include <asm/assembler.h>
  11. #include <asm/hwcap.h>
  12. #include <asm/pgtable-hwdef.h>
  13. #include <asm/ptrace.h>
  14. #include "proc-macros.S"
  15. /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
  16. #define CACHE_DLINESIZE 16
  17. #define CACHE_DSEGMENTS 4
  18. #define CACHE_DENTRIES 64
  19. .text
  20. /*
  21. * cpu_arm940_proc_init()
  22. * cpu_arm940_switch_mm()
  23. *
  24. * These are not required.
  25. */
  26. ENTRY(cpu_arm940_proc_init)
  27. ENTRY(cpu_arm940_switch_mm)
  28. ret lr
  29. /*
  30. * cpu_arm940_proc_fin()
  31. */
  32. ENTRY(cpu_arm940_proc_fin)
  33. mrc p15, 0, r0, c1, c0, 0 @ ctrl register
  34. bic r0, r0, #0x00001000 @ i-cache
  35. bic r0, r0, #0x00000004 @ d-cache
  36. mcr p15, 0, r0, c1, c0, 0 @ disable caches
  37. ret lr
  38. /*
  39. * cpu_arm940_reset(loc)
  40. * Params : r0 = address to jump to
  41. * Notes : This sets up everything for a reset
  42. */
  43. .pushsection .idmap.text, "ax"
  44. ENTRY(cpu_arm940_reset)
  45. mov ip, #0
  46. mcr p15, 0, ip, c7, c5, 0 @ flush I cache
  47. mcr p15, 0, ip, c7, c6, 0 @ flush D cache
  48. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  49. mrc p15, 0, ip, c1, c0, 0 @ ctrl register
  50. bic ip, ip, #0x00000005 @ .............c.p
  51. bic ip, ip, #0x00001000 @ i-cache
  52. mcr p15, 0, ip, c1, c0, 0 @ ctrl register
  53. ret r0
  54. ENDPROC(cpu_arm940_reset)
  55. .popsection
  56. /*
  57. * cpu_arm940_do_idle()
  58. */
  59. .align 5
  60. ENTRY(cpu_arm940_do_idle)
  61. mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
  62. ret lr
  63. /*
  64. * flush_icache_all()
  65. *
  66. * Unconditionally clean and invalidate the entire icache.
  67. */
  68. ENTRY(arm940_flush_icache_all)
  69. mov r0, #0
  70. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  71. ret lr
  72. ENDPROC(arm940_flush_icache_all)
  73. /*
  74. * flush_user_cache_all()
  75. */
  76. ENTRY(arm940_flush_user_cache_all)
  77. /* FALLTHROUGH */
  78. /*
  79. * flush_kern_cache_all()
  80. *
  81. * Clean and invalidate the entire cache.
  82. */
  83. ENTRY(arm940_flush_kern_cache_all)
  84. mov r2, #VM_EXEC
  85. /* FALLTHROUGH */
  86. /*
  87. * flush_user_cache_range(start, end, flags)
  88. *
  89. * There is no efficient way to flush a range of cache entries
  90. * in the specified address range. Thus, flushes all.
  91. *
  92. * - start - start address (inclusive)
  93. * - end - end address (exclusive)
  94. * - flags - vm_flags describing address space
  95. */
  96. ENTRY(arm940_flush_user_cache_range)
  97. mov ip, #0
  98. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  99. mcr p15, 0, ip, c7, c6, 0 @ flush D cache
  100. #else
  101. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  102. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  103. 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
  104. subs r3, r3, #1 << 26
  105. bcs 2b @ entries 63 to 0
  106. subs r1, r1, #1 << 4
  107. bcs 1b @ segments 3 to 0
  108. #endif
  109. tst r2, #VM_EXEC
  110. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  111. mcrne p15, 0, ip, c7, c10, 4 @ drain WB
  112. ret lr
  113. /*
  114. * coherent_kern_range(start, end)
  115. *
  116. * Ensure coherency between the Icache and the Dcache in the
  117. * region described by start, end. If you have non-snooping
  118. * Harvard caches, you need to implement this function.
  119. *
  120. * - start - virtual start address
  121. * - end - virtual end address
  122. */
  123. ENTRY(arm940_coherent_kern_range)
  124. /* FALLTHROUGH */
  125. /*
  126. * coherent_user_range(start, end)
  127. *
  128. * Ensure coherency between the Icache and the Dcache in the
  129. * region described by start, end. If you have non-snooping
  130. * Harvard caches, you need to implement this function.
  131. *
  132. * - start - virtual start address
  133. * - end - virtual end address
  134. */
  135. ENTRY(arm940_coherent_user_range)
  136. /* FALLTHROUGH */
  137. /*
  138. * flush_kern_dcache_area(void *addr, size_t size)
  139. *
  140. * Ensure no D cache aliasing occurs, either with itself or
  141. * the I cache
  142. *
  143. * - addr - kernel address
  144. * - size - region size
  145. */
  146. ENTRY(arm940_flush_kern_dcache_area)
  147. mov r0, #0
  148. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  149. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  150. 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
  151. subs r3, r3, #1 << 26
  152. bcs 2b @ entries 63 to 0
  153. subs r1, r1, #1 << 4
  154. bcs 1b @ segments 7 to 0
  155. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  156. mcr p15, 0, r0, c7, c10, 4 @ drain WB
  157. ret lr
  158. /*
  159. * dma_inv_range(start, end)
  160. *
  161. * There is no efficient way to invalidate a specifid virtual
  162. * address range. Thus, invalidates all.
  163. *
  164. * - start - virtual start address
  165. * - end - virtual end address
  166. */
  167. arm940_dma_inv_range:
  168. mov ip, #0
  169. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  170. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  171. 2: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
  172. subs r3, r3, #1 << 26
  173. bcs 2b @ entries 63 to 0
  174. subs r1, r1, #1 << 4
  175. bcs 1b @ segments 7 to 0
  176. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  177. ret lr
  178. /*
  179. * dma_clean_range(start, end)
  180. *
  181. * There is no efficient way to clean a specifid virtual
  182. * address range. Thus, cleans all.
  183. *
  184. * - start - virtual start address
  185. * - end - virtual end address
  186. */
  187. arm940_dma_clean_range:
  188. ENTRY(cpu_arm940_dcache_clean_area)
  189. mov ip, #0
  190. #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
  191. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  192. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  193. 2: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
  194. subs r3, r3, #1 << 26
  195. bcs 2b @ entries 63 to 0
  196. subs r1, r1, #1 << 4
  197. bcs 1b @ segments 7 to 0
  198. #endif
  199. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  200. ret lr
  201. /*
  202. * dma_flush_range(start, end)
  203. *
  204. * There is no efficient way to clean and invalidate a specifid
  205. * virtual address range.
  206. *
  207. * - start - virtual start address
  208. * - end - virtual end address
  209. */
  210. ENTRY(arm940_dma_flush_range)
  211. mov ip, #0
  212. mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
  213. 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
  214. 2:
  215. #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
  216. mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
  217. #else
  218. mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
  219. #endif
  220. subs r3, r3, #1 << 26
  221. bcs 2b @ entries 63 to 0
  222. subs r1, r1, #1 << 4
  223. bcs 1b @ segments 7 to 0
  224. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  225. ret lr
  226. /*
  227. * dma_map_area(start, size, dir)
  228. * - start - kernel virtual start address
  229. * - size - size of region
  230. * - dir - DMA direction
  231. */
  232. ENTRY(arm940_dma_map_area)
  233. add r1, r1, r0
  234. cmp r2, #DMA_TO_DEVICE
  235. beq arm940_dma_clean_range
  236. bcs arm940_dma_inv_range
  237. b arm940_dma_flush_range
  238. ENDPROC(arm940_dma_map_area)
  239. /*
  240. * dma_unmap_area(start, size, dir)
  241. * - start - kernel virtual start address
  242. * - size - size of region
  243. * - dir - DMA direction
  244. */
  245. ENTRY(arm940_dma_unmap_area)
  246. ret lr
  247. ENDPROC(arm940_dma_unmap_area)
  248. .globl arm940_flush_kern_cache_louis
  249. .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
  250. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  251. define_cache_functions arm940
  252. .type __arm940_setup, #function
  253. __arm940_setup:
  254. mov r0, #0
  255. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  256. mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
  257. mcr p15, 0, r0, c7, c10, 4 @ drain WB
  258. mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
  259. mcr p15, 0, r0, c6, c4, 0
  260. mcr p15, 0, r0, c6, c5, 0
  261. mcr p15, 0, r0, c6, c6, 0
  262. mcr p15, 0, r0, c6, c7, 0
  263. mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
  264. mcr p15, 0, r0, c6, c4, 1
  265. mcr p15, 0, r0, c6, c5, 1
  266. mcr p15, 0, r0, c6, c6, 1
  267. mcr p15, 0, r0, c6, c7, 1
  268. mov r0, #0x0000003F @ base = 0, size = 4GB
  269. mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
  270. mcr p15, 0, r0, c6, c0, 1
  271. ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
  272. ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
  273. pr_val r3, r0, r7, #1
  274. mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
  275. mcr p15, 0, r3, c6, c1, 1
  276. ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
  277. ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
  278. pr_val r3, r0, r6, #1
  279. mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
  280. mcr p15, 0, r3, c6, c2, 1
  281. mov r0, #0x06
  282. mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
  283. mcr p15, 0, r0, c2, c0, 1
  284. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  285. mov r0, #0x00 @ disable whole write buffer
  286. #else
  287. mov r0, #0x02 @ Region 1 write bufferred
  288. #endif
  289. mcr p15, 0, r0, c3, c0, 0
  290. mov r0, #0x10000
  291. sub r0, r0, #1 @ r0 = 0xffff
  292. mcr p15, 0, r0, c5, c0, 0 @ all read/write access
  293. mcr p15, 0, r0, c5, c0, 1
  294. mrc p15, 0, r0, c1, c0 @ get control register
  295. orr r0, r0, #0x00001000 @ I-cache
  296. orr r0, r0, #0x00000005 @ MPU/D-cache
  297. ret lr
  298. .size __arm940_setup, . - __arm940_setup
  299. __INITDATA
  300. @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
  301. define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
  302. .section ".rodata"
  303. string cpu_arch_name, "armv4t"
  304. string cpu_elf_name, "v4"
  305. string cpu_arm940_name, "ARM940T"
  306. .align
  307. .section ".proc.info.init", "a"
  308. .type __arm940_proc_info,#object
  309. __arm940_proc_info:
  310. .long 0x41009400
  311. .long 0xff00fff0
  312. .long 0
  313. initfn __arm940_setup, __arm940_proc_info
  314. .long cpu_arch_name
  315. .long cpu_elf_name
  316. .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
  317. .long cpu_arm940_name
  318. .long arm940_processor_functions
  319. .long 0
  320. .long 0
  321. .long arm940_cache_fns
  322. .size __arm940_proc_info, . - __arm940_proc_info