head_book3s_32.S 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * PowerPC version
  4. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  5. *
  6. * Rewritten by Cort Dougan ([email protected]) for PReP
  7. * Copyright (C) 1996 Cort Dougan <[email protected]>
  8. * Adapted for Power Macintosh by Paul Mackerras.
  9. * Low-level exception handlers and MMU support
  10. * rewritten by Paul Mackerras.
  11. * Copyright (C) 1996 Paul Mackerras.
  12. * MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).
  13. *
  14. * This file contains the low-level support and setup for the
  15. * PowerPC platform, including trap and interrupt dispatch.
  16. * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
  17. */
  18. #include <linux/init.h>
  19. #include <linux/pgtable.h>
  20. #include <asm/reg.h>
  21. #include <asm/page.h>
  22. #include <asm/mmu.h>
  23. #include <asm/cputable.h>
  24. #include <asm/cache.h>
  25. #include <asm/thread_info.h>
  26. #include <asm/ppc_asm.h>
  27. #include <asm/asm-offsets.h>
  28. #include <asm/ptrace.h>
  29. #include <asm/bug.h>
  30. #include <asm/kvm_book3s_asm.h>
  31. #include <asm/export.h>
  32. #include <asm/feature-fixups.h>
  33. #include <asm/interrupt.h>
  34. #include "head_32.h"
  35. #define LOAD_BAT(n, reg, RA, RB) \
  36. /* see the comment for clear_bats() -- Cort */ \
  37. li RA,0; \
  38. mtspr SPRN_IBAT##n##U,RA; \
  39. mtspr SPRN_DBAT##n##U,RA; \
  40. lwz RA,(n*16)+0(reg); \
  41. lwz RB,(n*16)+4(reg); \
  42. mtspr SPRN_IBAT##n##U,RA; \
  43. mtspr SPRN_IBAT##n##L,RB; \
  44. lwz RA,(n*16)+8(reg); \
  45. lwz RB,(n*16)+12(reg); \
  46. mtspr SPRN_DBAT##n##U,RA; \
  47. mtspr SPRN_DBAT##n##L,RB
  48. __HEAD
  49. _GLOBAL(_stext);
  50. /*
  51. * _start is defined this way because the XCOFF loader in the OpenFirmware
  52. * on the powermac expects the entry point to be a procedure descriptor.
  53. */
  54. _GLOBAL(_start);
  55. /*
  56. * These are here for legacy reasons, the kernel used to
  57. * need to look like a coff function entry for the pmac
  58. * but we're always started by some kind of bootloader now.
  59. * -- Cort
  60. */
  61. nop /* used by __secondary_hold on prep (mtx) and chrp smp */
  62. nop /* used by __secondary_hold on prep (mtx) and chrp smp */
  63. nop
  64. /* PMAC
  65. * Enter here with the kernel text, data and bss loaded starting at
  66. * 0, running with virtual == physical mapping.
  67. * r5 points to the prom entry point (the client interface handler
  68. * address). Address translation is turned on, with the prom
  69. * managing the hash table. Interrupts are disabled. The stack
  70. * pointer (r1) points to just below the end of the half-meg region
  71. * from 0x380000 - 0x400000, which is mapped in already.
  72. *
  73. * If we are booted from MacOS via BootX, we enter with the kernel
  74. * image loaded somewhere, and the following values in registers:
  75. * r3: 'BooX' (0x426f6f58)
  76. * r4: virtual address of boot_infos_t
  77. * r5: 0
  78. *
  79. * PREP
  80. * This is jumped to on prep systems right after the kernel is relocated
  81. * to its proper place in memory by the boot loader. The expected layout
  82. * of the regs is:
  83. * r3: ptr to residual data
  84. * r4: initrd_start or if no initrd then 0
  85. * r5: initrd_end - unused if r4 is 0
  86. * r6: Start of command line string
  87. * r7: End of command line string
  88. *
  89. * This just gets a minimal mmu environment setup so we can call
  90. * start_here() to do the real work.
  91. * -- Cort
  92. */
  93. .globl __start
  94. __start:
  95. /*
  96. * We have to do any OF calls before we map ourselves to KERNELBASE,
  97. * because OF may have I/O devices mapped into that area
  98. * (particularly on CHRP).
  99. */
  100. cmpwi 0,r5,0
  101. beq 1f
  102. #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
  103. /* find out where we are now */
  104. bcl 20,31,$+4
  105. 0: mflr r8 /* r8 = runtime addr here */
  106. addis r8,r8,(_stext - 0b)@ha
  107. addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
  108. bl prom_init
  109. #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
  110. /* We never return. We also hit that trap if trying to boot
  111. * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
  112. trap
  113. /*
  114. * Check for BootX signature when supporting PowerMac and branch to
  115. * appropriate trampoline if it's present
  116. */
  117. #ifdef CONFIG_PPC_PMAC
  118. 1: lis r31,0x426f
  119. ori r31,r31,0x6f58
  120. cmpw 0,r3,r31
  121. bne 1f
  122. bl bootx_init
  123. trap
  124. #endif /* CONFIG_PPC_PMAC */
  125. 1: mr r31,r3 /* save device tree ptr */
  126. li r24,0 /* cpu # */
  127. /*
  128. * early_init() does the early machine identification and does
  129. * the necessary low-level setup and clears the BSS
  130. * -- Cort <[email protected]>
  131. */
  132. bl early_init
  133. /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
  134. * the physical address we are running at, returned by early_init()
  135. */
  136. bl mmu_off
  137. __after_mmu_off:
  138. bl clear_bats
  139. bl flush_tlbs
  140. bl initial_bats
  141. bl load_segment_registers
  142. bl reloc_offset
  143. bl early_hash_table
  144. #if defined(CONFIG_BOOTX_TEXT)
  145. bl setup_disp_bat
  146. #endif
  147. #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
  148. bl setup_cpm_bat
  149. #endif
  150. #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
  151. bl setup_usbgecko_bat
  152. #endif
  153. /*
  154. * Call setup_cpu for CPU 0 and initialize 6xx Idle
  155. */
  156. bl reloc_offset
  157. li r24,0 /* cpu# */
  158. bl call_setup_cpu /* Call setup_cpu for this CPU */
  159. bl reloc_offset
  160. bl init_idle_6xx
  161. /*
  162. * We need to run with _start at physical address 0.
  163. * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
  164. * the exception vectors at 0 (and therefore this copy
  165. * overwrites OF's exception vectors with our own).
  166. * The MMU is off at this point.
  167. */
  168. bl reloc_offset
  169. mr r26,r3
  170. addis r4,r3,KERNELBASE@h /* current address of _start */
  171. lis r5,PHYSICAL_START@h
  172. cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
  173. bne relocate_kernel
  174. /*
  175. * we now have the 1st 16M of ram mapped with the bats.
  176. * prep needs the mmu to be turned on here, but pmac already has it on.
  177. * this shouldn't bother the pmac since it just gets turned on again
  178. * as we jump to our code at KERNELBASE. -- Cort
  179. * Actually no, pmac doesn't have it on any more. BootX enters with MMU
  180. * off, and in other cases, we now turn it off before changing BATs above.
  181. */
  182. turn_on_mmu:
  183. mfmsr r0
  184. ori r0,r0,MSR_DR|MSR_IR|MSR_RI
  185. mtspr SPRN_SRR1,r0
  186. lis r0,start_here@h
  187. ori r0,r0,start_here@l
  188. mtspr SPRN_SRR0,r0
  189. rfi /* enables MMU */
  190. /*
  191. * We need __secondary_hold as a place to hold the other cpus on
  192. * an SMP machine, even when we are running a UP kernel.
  193. */
  194. . = 0xc0 /* for prep bootloader */
  195. li r3,1 /* MTX only has 1 cpu */
  196. .globl __secondary_hold
  197. __secondary_hold:
  198. /* tell the master we're here */
  199. stw r3,__secondary_hold_acknowledge@l(0)
  200. #ifdef CONFIG_SMP
  201. 100: lwz r4,0(0)
  202. /* wait until we're told to start */
  203. cmpw 0,r4,r3
  204. bne 100b
  205. /* our cpu # was at addr 0 - go */
  206. mr r24,r3 /* cpu # */
  207. b __secondary_start
  208. #else
  209. b .
  210. #endif /* CONFIG_SMP */
  211. .globl __secondary_hold_spinloop
  212. __secondary_hold_spinloop:
  213. .long 0
  214. .globl __secondary_hold_acknowledge
  215. __secondary_hold_acknowledge:
  216. .long -1
  217. /* System reset */
  218. /* core99 pmac starts the seconary here by changing the vector, and
  219. putting it back to what it was (unknown_async_exception) when done. */
  220. EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
  221. /* Machine check */
  222. /*
  223. * On CHRP, this is complicated by the fact that we could get a
  224. * machine check inside RTAS, and we have no guarantee that certain
  225. * critical registers will have the values we expect. The set of
  226. * registers that might have bad values includes all the GPRs
  227. * and all the BATs. We indicate that we are in RTAS by putting
  228. * a non-zero value, the address of the exception frame to use,
  229. * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
  230. * and uses its value if it is non-zero.
  231. * (Other exception handlers assume that r1 is a valid kernel stack
  232. * pointer when we take an exception from supervisor mode.)
  233. * -- paulus.
  234. */
  235. START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
  236. EXCEPTION_PROLOG_0
  237. #ifdef CONFIG_PPC_CHRP
  238. mtspr SPRN_SPRG_SCRATCH2,r1
  239. mfspr r1, SPRN_SPRG_THREAD
  240. lwz r1, RTAS_SP(r1)
  241. cmpwi cr1, r1, 0
  242. bne cr1, 7f
  243. mfspr r1, SPRN_SPRG_SCRATCH2
  244. #endif /* CONFIG_PPC_CHRP */
  245. EXCEPTION_PROLOG_1
  246. 7: EXCEPTION_PROLOG_2 0x200 MachineCheck
  247. #ifdef CONFIG_PPC_CHRP
  248. beq cr1, 1f
  249. twi 31, 0, 0
  250. #endif
  251. 1: prepare_transfer_to_handler
  252. bl machine_check_exception
  253. b interrupt_return
  254. /* Data access exception. */
  255. START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
  256. #ifdef CONFIG_PPC_BOOK3S_604
  257. BEGIN_MMU_FTR_SECTION
  258. mtspr SPRN_SPRG_SCRATCH2,r10
  259. mfspr r10, SPRN_SPRG_THREAD
  260. stw r11, THR11(r10)
  261. mfspr r10, SPRN_DSISR
  262. mfcr r11
  263. andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
  264. mfspr r10, SPRN_SPRG_THREAD
  265. beq hash_page_dsi
  266. .Lhash_page_dsi_cont:
  267. mtcr r11
  268. lwz r11, THR11(r10)
  269. mfspr r10, SPRN_SPRG_SCRATCH2
  270. MMU_FTR_SECTION_ELSE
  271. b 1f
  272. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
  273. #endif
  274. 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
  275. EXCEPTION_PROLOG_1
  276. EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
  277. prepare_transfer_to_handler
  278. lwz r5, _DSISR(r1)
  279. andis. r0, r5, DSISR_DABRMATCH@h
  280. bne- 1f
  281. bl do_page_fault
  282. b interrupt_return
  283. 1: bl do_break
  284. REST_NVGPRS(r1)
  285. b interrupt_return
  286. /* Instruction access exception. */
  287. START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
  288. mtspr SPRN_SPRG_SCRATCH0,r10
  289. mtspr SPRN_SPRG_SCRATCH1,r11
  290. mfspr r10, SPRN_SPRG_THREAD
  291. mfspr r11, SPRN_SRR0
  292. stw r11, SRR0(r10)
  293. mfspr r11, SPRN_SRR1 /* check whether user or kernel */
  294. stw r11, SRR1(r10)
  295. mfcr r10
  296. #ifdef CONFIG_PPC_BOOK3S_604
  297. BEGIN_MMU_FTR_SECTION
  298. andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
  299. bne hash_page_isi
  300. .Lhash_page_isi_cont:
  301. mfspr r11, SPRN_SRR1 /* check whether user or kernel */
  302. END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
  303. #endif
  304. andi. r11, r11, MSR_PR
  305. EXCEPTION_PROLOG_1
  306. EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
  307. andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
  308. stw r5, _DSISR(r11)
  309. stw r12, _DAR(r11)
  310. prepare_transfer_to_handler
  311. bl do_page_fault
  312. b interrupt_return
  313. /* External interrupt */
  314. EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
  315. /* Alignment exception */
  316. START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
  317. EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
  318. prepare_transfer_to_handler
  319. bl alignment_exception
  320. REST_NVGPRS(r1)
  321. b interrupt_return
  322. /* Program check exception */
  323. START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
  324. EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
  325. prepare_transfer_to_handler
  326. bl program_check_exception
  327. REST_NVGPRS(r1)
  328. b interrupt_return
  329. /* Floating-point unavailable */
  330. START_EXCEPTION(0x800, FPUnavailable)
  331. #ifdef CONFIG_PPC_FPU
  332. BEGIN_FTR_SECTION
  333. /*
  334. * Certain Freescale cores don't have a FPU and treat fp instructions
  335. * as a FP Unavailable exception. Redirect to illegal/emulation handling.
  336. */
  337. b ProgramCheck
  338. END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
  339. EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
  340. beq 1f
  341. bl load_up_fpu /* if from user, just load it up */
  342. b fast_exception_return
  343. 1: prepare_transfer_to_handler
  344. bl kernel_fp_unavailable_exception
  345. b interrupt_return
  346. #else
  347. b ProgramCheck
  348. #endif
  349. /* Decrementer */
  350. EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
  351. EXCEPTION(0xa00, Trap_0a, unknown_exception)
  352. EXCEPTION(0xb00, Trap_0b, unknown_exception)
  353. /* System call */
  354. START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
  355. SYSCALL_ENTRY INTERRUPT_SYSCALL
  356. EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
  357. EXCEPTION(0xe00, Trap_0e, unknown_exception)
  358. /*
  359. * The Altivec unavailable trap is at 0x0f20. Foo.
  360. * We effectively remap it to 0x3000.
  361. * We include an altivec unavailable exception vector even if
  362. * not configured for Altivec, so that you can't panic a
  363. * non-altivec kernel running on a machine with altivec just
  364. * by executing an altivec instruction.
  365. */
  366. START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
  367. b PerformanceMonitor
  368. START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
  369. b AltiVecUnavailable
  370. __HEAD
  371. /*
  372. * Handle TLB miss for instruction on 603/603e.
  373. * Note: we get an alternate set of r0 - r3 to use automatically.
  374. */
  375. . = INTERRUPT_INST_TLB_MISS_603
  376. InstructionTLBMiss:
  377. /*
  378. * r0: scratch
  379. * r1: linux style pte ( later becomes ppc hardware pte )
  380. * r2: ptr to linux-style pte
  381. * r3: scratch
  382. */
  383. /* Get PTE (linux-style) and check access */
  384. mfspr r3,SPRN_IMISS
  385. #ifdef CONFIG_MODULES
  386. lis r1, TASK_SIZE@h /* check if kernel address */
  387. cmplw 0,r1,r3
  388. #endif
  389. mfspr r2, SPRN_SDR1
  390. li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
  391. rlwinm r2, r2, 28, 0xfffff000
  392. #ifdef CONFIG_MODULES
  393. bgt- 112f
  394. lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
  395. li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
  396. addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
  397. #endif
  398. 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
  399. lwz r2,0(r2) /* get pmd entry */
  400. rlwinm. r2,r2,0,0,19 /* extract address of pte page */
  401. beq- InstructionAddressInvalid /* return if no mapping */
  402. rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
  403. lwz r0,0(r2) /* get linux-style pte */
  404. andc. r1,r1,r0 /* check access & ~permission */
  405. bne- InstructionAddressInvalid /* return if access not permitted */
  406. /* Convert linux-style PTE to low word of PPC-style PTE */
  407. rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
  408. ori r1, r1, 0xe06 /* clear out reserved bits */
  409. andc r1, r0, r1 /* PP = user? 1 : 0 */
  410. BEGIN_FTR_SECTION
  411. rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
  412. END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
  413. mtspr SPRN_RPA,r1
  414. tlbli r3
  415. mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
  416. mtcrf 0x80,r3
  417. rfi
  418. InstructionAddressInvalid:
  419. mfspr r3,SPRN_SRR1
  420. rlwinm r1,r3,9,6,6 /* Get load/store bit */
  421. addis r1,r1,0x2000
  422. mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
  423. andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
  424. or r2,r2,r1
  425. mtspr SPRN_SRR1,r2
  426. mfspr r1,SPRN_IMISS /* Get failing address */
  427. rlwinm. r2,r2,0,31,31 /* Check for little endian access */
  428. rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
  429. xor r1,r1,r2
  430. mtspr SPRN_DAR,r1 /* Set fault address */
  431. mfmsr r0 /* Restore "normal" registers */
  432. xoris r0,r0,MSR_TGPR>>16
  433. mtcrf 0x80,r3 /* Restore CR0 */
  434. mtmsr r0
  435. b InstructionAccess
  436. /*
  437. * Handle TLB miss for DATA Load operation on 603/603e
  438. */
  439. . = INTERRUPT_DATA_LOAD_TLB_MISS_603
  440. DataLoadTLBMiss:
  441. /*
  442. * r0: scratch
  443. * r1: linux style pte ( later becomes ppc hardware pte )
  444. * r2: ptr to linux-style pte
  445. * r3: scratch
  446. */
  447. /* Get PTE (linux-style) and check access */
  448. mfspr r3,SPRN_DMISS
  449. lis r1, TASK_SIZE@h /* check if kernel address */
  450. cmplw 0,r1,r3
  451. mfspr r2, SPRN_SDR1
  452. li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
  453. rlwinm r2, r2, 28, 0xfffff000
  454. bgt- 112f
  455. lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
  456. li r1, _PAGE_PRESENT | _PAGE_ACCESSED
  457. addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
  458. 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
  459. lwz r2,0(r2) /* get pmd entry */
  460. rlwinm. r2,r2,0,0,19 /* extract address of pte page */
  461. beq- DataAddressInvalid /* return if no mapping */
  462. rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
  463. lwz r0,0(r2) /* get linux-style pte */
  464. andc. r1,r1,r0 /* check access & ~permission */
  465. bne- DataAddressInvalid /* return if access not permitted */
  466. /* Convert linux-style PTE to low word of PPC-style PTE */
  467. rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
  468. rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
  469. rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
  470. rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
  471. xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
  472. ori r1,r1,0xe04 /* clear out reserved bits */
  473. andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
  474. BEGIN_FTR_SECTION
  475. rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
  476. END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
  477. mtspr SPRN_RPA,r1
  478. BEGIN_MMU_FTR_SECTION
  479. li r0,1
  480. mfspr r1,SPRN_SPRG_603_LRU
  481. rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
  482. slw r0,r0,r2
  483. xor r1,r0,r1
  484. srw r0,r1,r2
  485. mtspr SPRN_SPRG_603_LRU,r1
  486. mfspr r2,SPRN_SRR1
  487. rlwimi r2,r0,31-14,14,14
  488. mtspr SPRN_SRR1,r2
  489. mtcrf 0x80,r2
  490. tlbld r3
  491. rfi
  492. MMU_FTR_SECTION_ELSE
  493. mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
  494. mtcrf 0x80,r2
  495. tlbld r3
  496. rfi
  497. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
  498. DataAddressInvalid:
  499. mfspr r3,SPRN_SRR1
  500. rlwinm r1,r3,9,6,6 /* Get load/store bit */
  501. addis r1,r1,0x2000
  502. mtspr SPRN_DSISR,r1
  503. andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
  504. mtspr SPRN_SRR1,r2
  505. mfspr r1,SPRN_DMISS /* Get failing address */
  506. rlwinm. r2,r2,0,31,31 /* Check for little endian access */
  507. beq 20f /* Jump if big endian */
  508. xori r1,r1,3
  509. 20: mtspr SPRN_DAR,r1 /* Set fault address */
  510. mfmsr r0 /* Restore "normal" registers */
  511. xoris r0,r0,MSR_TGPR>>16
  512. mtcrf 0x80,r3 /* Restore CR0 */
  513. mtmsr r0
  514. b DataAccess
  515. /*
  516. * Handle TLB miss for DATA Store on 603/603e
  517. */
  518. . = INTERRUPT_DATA_STORE_TLB_MISS_603
  519. DataStoreTLBMiss:
  520. /*
  521. * r0: scratch
  522. * r1: linux style pte ( later becomes ppc hardware pte )
  523. * r2: ptr to linux-style pte
  524. * r3: scratch
  525. */
  526. /* Get PTE (linux-style) and check access */
  527. mfspr r3,SPRN_DMISS
  528. lis r1, TASK_SIZE@h /* check if kernel address */
  529. cmplw 0,r1,r3
  530. mfspr r2, SPRN_SDR1
  531. li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
  532. rlwinm r2, r2, 28, 0xfffff000
  533. bgt- 112f
  534. lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
  535. li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
  536. addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
  537. 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
  538. lwz r2,0(r2) /* get pmd entry */
  539. rlwinm. r2,r2,0,0,19 /* extract address of pte page */
  540. beq- DataAddressInvalid /* return if no mapping */
  541. rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
  542. lwz r0,0(r2) /* get linux-style pte */
  543. andc. r1,r1,r0 /* check access & ~permission */
  544. bne- DataAddressInvalid /* return if access not permitted */
  545. /* Convert linux-style PTE to low word of PPC-style PTE */
  546. rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
  547. li r1,0xe06 /* clear out reserved bits & PP msb */
  548. andc r1,r0,r1 /* PP = user? 1: 0 */
  549. BEGIN_FTR_SECTION
  550. rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
  551. END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
  552. mtspr SPRN_RPA,r1
  553. mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
  554. mtcrf 0x80,r2
  555. BEGIN_MMU_FTR_SECTION
  556. li r0,1
  557. mfspr r1,SPRN_SPRG_603_LRU
  558. rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
  559. slw r0,r0,r2
  560. xor r1,r0,r1
  561. srw r0,r1,r2
  562. mtspr SPRN_SPRG_603_LRU,r1
  563. mfspr r2,SPRN_SRR1
  564. rlwimi r2,r0,31-14,14,14
  565. mtspr SPRN_SRR1,r2
  566. mtcrf 0x80,r2
  567. tlbld r3
  568. rfi
  569. MMU_FTR_SECTION_ELSE
  570. mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
  571. mtcrf 0x80,r2
  572. tlbld r3
  573. rfi
  574. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
  575. #ifndef CONFIG_ALTIVEC
  576. #define altivec_assist_exception unknown_exception
  577. #endif
  578. #ifndef CONFIG_TAU_INT
  579. #define TAUException unknown_async_exception
  580. #endif
  581. EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
  582. EXCEPTION(0x1400, SMI, SMIException)
  583. EXCEPTION(0x1500, Trap_15, unknown_exception)
  584. EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
  585. EXCEPTION(0x1700, Trap_17, TAUException)
  586. EXCEPTION(0x1800, Trap_18, unknown_exception)
  587. EXCEPTION(0x1900, Trap_19, unknown_exception)
  588. EXCEPTION(0x1a00, Trap_1a, unknown_exception)
  589. EXCEPTION(0x1b00, Trap_1b, unknown_exception)
  590. EXCEPTION(0x1c00, Trap_1c, unknown_exception)
  591. EXCEPTION(0x1d00, Trap_1d, unknown_exception)
  592. EXCEPTION(0x1e00, Trap_1e, unknown_exception)
  593. EXCEPTION(0x1f00, Trap_1f, unknown_exception)
  594. EXCEPTION(0x2000, RunMode, RunModeException)
  595. EXCEPTION(0x2100, Trap_21, unknown_exception)
  596. EXCEPTION(0x2200, Trap_22, unknown_exception)
  597. EXCEPTION(0x2300, Trap_23, unknown_exception)
  598. EXCEPTION(0x2400, Trap_24, unknown_exception)
  599. EXCEPTION(0x2500, Trap_25, unknown_exception)
  600. EXCEPTION(0x2600, Trap_26, unknown_exception)
  601. EXCEPTION(0x2700, Trap_27, unknown_exception)
  602. EXCEPTION(0x2800, Trap_28, unknown_exception)
  603. EXCEPTION(0x2900, Trap_29, unknown_exception)
  604. EXCEPTION(0x2a00, Trap_2a, unknown_exception)
  605. EXCEPTION(0x2b00, Trap_2b, unknown_exception)
  606. EXCEPTION(0x2c00, Trap_2c, unknown_exception)
  607. EXCEPTION(0x2d00, Trap_2d, unknown_exception)
  608. EXCEPTION(0x2e00, Trap_2e, unknown_exception)
  609. EXCEPTION(0x2f00, Trap_2f, unknown_exception)
  610. __HEAD
  611. . = 0x3000
  612. #ifdef CONFIG_PPC_BOOK3S_604
  613. .macro save_regs_thread thread
  614. stw r0, THR0(\thread)
  615. stw r3, THR3(\thread)
  616. stw r4, THR4(\thread)
  617. stw r5, THR5(\thread)
  618. stw r6, THR6(\thread)
  619. stw r8, THR8(\thread)
  620. stw r9, THR9(\thread)
  621. mflr r0
  622. stw r0, THLR(\thread)
  623. mfctr r0
  624. stw r0, THCTR(\thread)
  625. .endm
  626. .macro restore_regs_thread thread
  627. lwz r0, THLR(\thread)
  628. mtlr r0
  629. lwz r0, THCTR(\thread)
  630. mtctr r0
  631. lwz r0, THR0(\thread)
  632. lwz r3, THR3(\thread)
  633. lwz r4, THR4(\thread)
  634. lwz r5, THR5(\thread)
  635. lwz r6, THR6(\thread)
  636. lwz r8, THR8(\thread)
  637. lwz r9, THR9(\thread)
  638. .endm
  639. hash_page_dsi:
  640. save_regs_thread r10
  641. mfdsisr r3
  642. mfdar r4
  643. mfsrr0 r5
  644. mfsrr1 r9
  645. rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
  646. bl hash_page
  647. mfspr r10, SPRN_SPRG_THREAD
  648. restore_regs_thread r10
  649. b .Lhash_page_dsi_cont
  650. hash_page_isi:
  651. mr r11, r10
  652. mfspr r10, SPRN_SPRG_THREAD
  653. save_regs_thread r10
  654. li r3, 0
  655. lwz r4, SRR0(r10)
  656. lwz r9, SRR1(r10)
  657. bl hash_page
  658. mfspr r10, SPRN_SPRG_THREAD
  659. restore_regs_thread r10
  660. mr r10, r11
  661. b .Lhash_page_isi_cont
  662. .globl fast_hash_page_return
  663. fast_hash_page_return:
  664. andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
  665. mfspr r10, SPRN_SPRG_THREAD
  666. restore_regs_thread r10
  667. bne 1f
  668. /* DSI */
  669. mtcr r11
  670. lwz r11, THR11(r10)
  671. mfspr r10, SPRN_SPRG_SCRATCH2
  672. rfi
  673. 1: /* ISI */
  674. mtcr r11
  675. mfspr r11, SPRN_SPRG_SCRATCH1
  676. mfspr r10, SPRN_SPRG_SCRATCH0
  677. rfi
  678. #endif /* CONFIG_PPC_BOOK3S_604 */
  679. #ifdef CONFIG_VMAP_STACK
  680. vmap_stack_overflow_exception
  681. #endif
  682. __HEAD
  683. AltiVecUnavailable:
  684. EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
  685. #ifdef CONFIG_ALTIVEC
  686. beq 1f
  687. bl load_up_altivec /* if from user, just load it up */
  688. b fast_exception_return
  689. #endif /* CONFIG_ALTIVEC */
  690. 1: prepare_transfer_to_handler
  691. bl altivec_unavailable_exception
  692. b interrupt_return
  693. __HEAD
  694. PerformanceMonitor:
  695. EXCEPTION_PROLOG 0xf00 PerformanceMonitor
  696. prepare_transfer_to_handler
  697. bl performance_monitor_exception
  698. b interrupt_return
  699. __HEAD
  700. /*
  701. * This code is jumped to from the startup code to copy
  702. * the kernel image to physical address PHYSICAL_START.
  703. */
  704. relocate_kernel:
  705. lis r3,PHYSICAL_START@h /* Destination base address */
  706. li r6,0 /* Destination offset */
  707. li r5,0x4000 /* # bytes of memory to copy */
  708. bl copy_and_flush /* copy the first 0x4000 bytes */
  709. addi r0,r3,4f@l /* jump to the address of 4f */
  710. mtctr r0 /* in copy and do the rest. */
  711. bctr /* jump to the copy */
  712. 4: lis r5,_end-KERNELBASE@h
  713. ori r5,r5,_end-KERNELBASE@l
  714. bl copy_and_flush /* copy the rest */
  715. b turn_on_mmu
  716. /*
  717. * Copy routine used to copy the kernel to start at physical address 0
  718. * and flush and invalidate the caches as needed.
  719. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
  720. * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
  721. */
  722. _GLOBAL(copy_and_flush)
  723. addi r5,r5,-4
  724. addi r6,r6,-4
  725. 4: li r0,L1_CACHE_BYTES/4
  726. mtctr r0
  727. 3: addi r6,r6,4 /* copy a cache line */
  728. lwzx r0,r6,r4
  729. stwx r0,r6,r3
  730. bdnz 3b
  731. dcbst r6,r3 /* write it to memory */
  732. sync
  733. icbi r6,r3 /* flush the icache line */
  734. cmplw 0,r6,r5
  735. blt 4b
  736. sync /* additional sync needed on g4 */
  737. isync
  738. addi r5,r5,4
  739. addi r6,r6,4
  740. blr
  741. #ifdef CONFIG_SMP
  742. .globl __secondary_start_mpc86xx
  743. __secondary_start_mpc86xx:
  744. mfspr r3, SPRN_PIR
  745. stw r3, __secondary_hold_acknowledge@l(0)
  746. mr r24, r3 /* cpu # */
  747. b __secondary_start
  748. .globl __secondary_start_pmac_0
  749. __secondary_start_pmac_0:
  750. /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
  751. li r24,0
  752. b 1f
  753. li r24,1
  754. b 1f
  755. li r24,2
  756. b 1f
  757. li r24,3
  758. 1:
  759. /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
  760. set to map the 0xf0000000 - 0xffffffff region */
  761. mfmsr r0
  762. rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
  763. mtmsr r0
  764. isync
  765. .globl __secondary_start
  766. __secondary_start:
  767. /* Copy some CPU settings from CPU 0 */
  768. bl __restore_cpu_setup
  769. lis r3,-KERNELBASE@h
  770. mr r4,r24
  771. bl call_setup_cpu /* Call setup_cpu for this CPU */
  772. lis r3,-KERNELBASE@h
  773. bl init_idle_6xx
  774. /* get current's stack and current */
  775. lis r2,secondary_current@ha
  776. tophys(r2,r2)
  777. lwz r2,secondary_current@l(r2)
  778. tophys(r1,r2)
  779. lwz r1,TASK_STACK(r1)
  780. /* stack */
  781. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  782. li r0,0
  783. tophys(r3,r1)
  784. stw r0,0(r3)
  785. /* load up the MMU */
  786. bl load_segment_registers
  787. bl load_up_mmu
  788. /* ptr to phys current thread */
  789. tophys(r4,r2)
  790. addi r4,r4,THREAD /* phys address of our thread_struct */
  791. mtspr SPRN_SPRG_THREAD,r4
  792. BEGIN_MMU_FTR_SECTION
  793. lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
  794. ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
  795. rlwinm r4, r4, 4, 0xffff01ff
  796. mtspr SPRN_SDR1, r4
  797. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
  798. /* enable MMU and jump to start_secondary */
  799. li r4,MSR_KERNEL
  800. lis r3,start_secondary@h
  801. ori r3,r3,start_secondary@l
  802. mtspr SPRN_SRR0,r3
  803. mtspr SPRN_SRR1,r4
  804. rfi
  805. #endif /* CONFIG_SMP */
  806. #ifdef CONFIG_KVM_BOOK3S_HANDLER
  807. #include "../kvm/book3s_rmhandlers.S"
  808. #endif
  809. /*
  810. * Load stuff into the MMU. Intended to be called with
  811. * IR=0 and DR=0.
  812. */
  813. early_hash_table:
  814. sync /* Force all PTE updates to finish */
  815. isync
  816. tlbia /* Clear all TLB entries */
  817. sync /* wait for tlbia/tlbie to finish */
  818. TLBSYNC /* ... on all CPUs */
  819. /* Load the SDR1 register (hash table base & size) */
  820. lis r6, early_hash - PAGE_OFFSET@h
  821. ori r6, r6, 3 /* 256kB table */
  822. mtspr SPRN_SDR1, r6
  823. blr
  824. load_up_mmu:
  825. sync /* Force all PTE updates to finish */
  826. isync
  827. tlbia /* Clear all TLB entries */
  828. sync /* wait for tlbia/tlbie to finish */
  829. TLBSYNC /* ... on all CPUs */
  830. BEGIN_MMU_FTR_SECTION
  831. /* Load the SDR1 register (hash table base & size) */
  832. lis r6,_SDR1@ha
  833. tophys(r6,r6)
  834. lwz r6,_SDR1@l(r6)
  835. mtspr SPRN_SDR1,r6
  836. END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
  837. /* Load the BAT registers with the values set up by MMU_init. */
  838. lis r3,BATS@ha
  839. addi r3,r3,BATS@l
  840. tophys(r3,r3)
  841. LOAD_BAT(0,r3,r4,r5)
  842. LOAD_BAT(1,r3,r4,r5)
  843. LOAD_BAT(2,r3,r4,r5)
  844. LOAD_BAT(3,r3,r4,r5)
  845. BEGIN_MMU_FTR_SECTION
  846. LOAD_BAT(4,r3,r4,r5)
  847. LOAD_BAT(5,r3,r4,r5)
  848. LOAD_BAT(6,r3,r4,r5)
  849. LOAD_BAT(7,r3,r4,r5)
  850. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
  851. blr
  852. _GLOBAL(load_segment_registers)
  853. li r0, NUM_USER_SEGMENTS /* load up user segment register values */
  854. mtctr r0 /* for context 0 */
  855. #ifdef CONFIG_PPC_KUEP
  856. lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
  857. #else
  858. li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
  859. #endif
  860. li r4, 0
  861. 3: mtsrin r3, r4
  862. addi r3, r3, 0x111 /* increment VSID */
  863. addis r4, r4, 0x1000 /* address of next segment */
  864. bdnz 3b
  865. li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
  866. mtctr r0 /* for context 0 */
  867. rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
  868. rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
  869. oris r3, r3, SR_KP@h /* Kp = 1 */
  870. 3: mtsrin r3, r4
  871. addi r3, r3, 0x111 /* increment VSID */
  872. addis r4, r4, 0x1000 /* address of next segment */
  873. bdnz 3b
  874. blr
  875. /*
  876. * This is where the main kernel code starts.
  877. */
  878. start_here:
  879. /* ptr to current */
  880. lis r2,init_task@h
  881. ori r2,r2,init_task@l
  882. /* Set up for using our exception vectors */
  883. /* ptr to phys current thread */
  884. tophys(r4,r2)
  885. addi r4,r4,THREAD /* init task's THREAD */
  886. mtspr SPRN_SPRG_THREAD,r4
  887. BEGIN_MMU_FTR_SECTION
  888. lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
  889. ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
  890. rlwinm r4, r4, 4, 0xffff01ff
  891. mtspr SPRN_SDR1, r4
  892. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
  893. /* stack */
  894. lis r1,init_thread_union@ha
  895. addi r1,r1,init_thread_union@l
  896. li r0,0
  897. stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
  898. /*
  899. * Do early platform-specific initialization,
  900. * and set up the MMU.
  901. */
  902. #ifdef CONFIG_KASAN
  903. bl kasan_early_init
  904. #endif
  905. li r3,0
  906. mr r4,r31
  907. bl machine_init
  908. bl __save_cpu_setup
  909. bl MMU_init
  910. bl MMU_init_hw_patch
  911. /*
  912. * Go back to running unmapped so we can load up new values
  913. * for SDR1 (hash table pointer) and the segment registers
  914. * and change to using our exception vectors.
  915. */
  916. lis r4,2f@h
  917. ori r4,r4,2f@l
  918. tophys(r4,r4)
  919. li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
  920. .align 4
  921. mtspr SPRN_SRR0,r4
  922. mtspr SPRN_SRR1,r3
  923. rfi
  924. /* Load up the kernel context */
  925. 2: bl load_up_mmu
  926. #ifdef CONFIG_BDI_SWITCH
  927. /* Add helper information for the Abatron bdiGDB debugger.
  928. * We do this here because we know the mmu is disabled, and
  929. * will be enabled for real in just a few instructions.
  930. */
  931. lis r5, abatron_pteptrs@h
  932. ori r5, r5, abatron_pteptrs@l
  933. stw r5, 0xf0(0) /* This much match your Abatron config */
  934. lis r6, swapper_pg_dir@h
  935. ori r6, r6, swapper_pg_dir@l
  936. tophys(r5, r5)
  937. stw r6, 0(r5)
  938. #endif /* CONFIG_BDI_SWITCH */
  939. /* Now turn on the MMU for real! */
  940. li r4,MSR_KERNEL
  941. lis r3,start_kernel@h
  942. ori r3,r3,start_kernel@l
  943. mtspr SPRN_SRR0,r3
  944. mtspr SPRN_SRR1,r4
  945. rfi
  946. /*
  947. * An undocumented "feature" of 604e requires that the v bit
  948. * be cleared before changing BAT values.
  949. *
  950. * Also, newer IBM firmware does not clear bat3 and 4 so
  951. * this makes sure it's done.
  952. * -- Cort
  953. */
  954. clear_bats:
  955. li r10,0
  956. mtspr SPRN_DBAT0U,r10
  957. mtspr SPRN_DBAT0L,r10
  958. mtspr SPRN_DBAT1U,r10
  959. mtspr SPRN_DBAT1L,r10
  960. mtspr SPRN_DBAT2U,r10
  961. mtspr SPRN_DBAT2L,r10
  962. mtspr SPRN_DBAT3U,r10
  963. mtspr SPRN_DBAT3L,r10
  964. mtspr SPRN_IBAT0U,r10
  965. mtspr SPRN_IBAT0L,r10
  966. mtspr SPRN_IBAT1U,r10
  967. mtspr SPRN_IBAT1L,r10
  968. mtspr SPRN_IBAT2U,r10
  969. mtspr SPRN_IBAT2L,r10
  970. mtspr SPRN_IBAT3U,r10
  971. mtspr SPRN_IBAT3L,r10
  972. BEGIN_MMU_FTR_SECTION
  973. /* Here's a tweak: at this point, CPU setup have
  974. * not been called yet, so HIGH_BAT_EN may not be
  975. * set in HID0 for the 745x processors. However, it
  976. * seems that doesn't affect our ability to actually
  977. * write to these SPRs.
  978. */
  979. mtspr SPRN_DBAT4U,r10
  980. mtspr SPRN_DBAT4L,r10
  981. mtspr SPRN_DBAT5U,r10
  982. mtspr SPRN_DBAT5L,r10
  983. mtspr SPRN_DBAT6U,r10
  984. mtspr SPRN_DBAT6L,r10
  985. mtspr SPRN_DBAT7U,r10
  986. mtspr SPRN_DBAT7L,r10
  987. mtspr SPRN_IBAT4U,r10
  988. mtspr SPRN_IBAT4L,r10
  989. mtspr SPRN_IBAT5U,r10
  990. mtspr SPRN_IBAT5L,r10
  991. mtspr SPRN_IBAT6U,r10
  992. mtspr SPRN_IBAT6L,r10
  993. mtspr SPRN_IBAT7U,r10
  994. mtspr SPRN_IBAT7L,r10
  995. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
  996. blr
  997. _GLOBAL(update_bats)
  998. lis r4, 1f@h
  999. ori r4, r4, 1f@l
  1000. tophys(r4, r4)
  1001. mfmsr r6
  1002. mflr r7
  1003. li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
  1004. rlwinm r0, r6, 0, ~MSR_RI
  1005. rlwinm r0, r0, 0, ~MSR_EE
  1006. mtmsr r0
  1007. .align 4
  1008. mtspr SPRN_SRR0, r4
  1009. mtspr SPRN_SRR1, r3
  1010. rfi
  1011. 1: bl clear_bats
  1012. lis r3, BATS@ha
  1013. addi r3, r3, BATS@l
  1014. tophys(r3, r3)
  1015. LOAD_BAT(0, r3, r4, r5)
  1016. LOAD_BAT(1, r3, r4, r5)
  1017. LOAD_BAT(2, r3, r4, r5)
  1018. LOAD_BAT(3, r3, r4, r5)
  1019. BEGIN_MMU_FTR_SECTION
  1020. LOAD_BAT(4, r3, r4, r5)
  1021. LOAD_BAT(5, r3, r4, r5)
  1022. LOAD_BAT(6, r3, r4, r5)
  1023. LOAD_BAT(7, r3, r4, r5)
  1024. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
  1025. li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
  1026. mtmsr r3
  1027. mtspr SPRN_SRR0, r7
  1028. mtspr SPRN_SRR1, r6
  1029. rfi
  1030. flush_tlbs:
  1031. lis r10, 0x40
  1032. 1: addic. r10, r10, -0x1000
  1033. tlbie r10
  1034. bgt 1b
  1035. sync
  1036. blr
  1037. mmu_off:
  1038. addi r4, r3, __after_mmu_off - _start
  1039. mfmsr r3
  1040. andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
  1041. beqlr
  1042. andc r3,r3,r0
  1043. .align 4
  1044. mtspr SPRN_SRR0,r4
  1045. mtspr SPRN_SRR1,r3
  1046. sync
  1047. rfi
  1048. /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
  1049. initial_bats:
  1050. lis r11,PAGE_OFFSET@h
  1051. tophys(r8,r11)
  1052. #ifdef CONFIG_SMP
  1053. ori r8,r8,0x12 /* R/W access, M=1 */
  1054. #else
  1055. ori r8,r8,2 /* R/W access */
  1056. #endif /* CONFIG_SMP */
  1057. ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
  1058. mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
  1059. mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
  1060. mtspr SPRN_IBAT0L,r8
  1061. mtspr SPRN_IBAT0U,r11
  1062. isync
  1063. blr
  1064. #ifdef CONFIG_BOOTX_TEXT
  1065. setup_disp_bat:
  1066. /*
  1067. * setup the display bat prepared for us in prom.c
  1068. */
  1069. mflr r8
  1070. bl reloc_offset
  1071. mtlr r8
  1072. addis r8,r3,disp_BAT@ha
  1073. addi r8,r8,disp_BAT@l
  1074. cmpwi cr0,r8,0
  1075. beqlr
  1076. lwz r11,0(r8)
  1077. lwz r8,4(r8)
  1078. mtspr SPRN_DBAT3L,r8
  1079. mtspr SPRN_DBAT3U,r11
  1080. blr
  1081. #endif /* CONFIG_BOOTX_TEXT */
  1082. #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
  1083. setup_cpm_bat:
  1084. lis r8, 0xf000
  1085. ori r8, r8, 0x002a
  1086. mtspr SPRN_DBAT1L, r8
  1087. lis r11, 0xf000
  1088. ori r11, r11, (BL_1M << 2) | 2
  1089. mtspr SPRN_DBAT1U, r11
  1090. blr
  1091. #endif
  1092. #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
  1093. setup_usbgecko_bat:
  1094. /* prepare a BAT for early io */
  1095. #if defined(CONFIG_GAMECUBE)
  1096. lis r8, 0x0c00
  1097. #elif defined(CONFIG_WII)
  1098. lis r8, 0x0d00
  1099. #else
  1100. #error Invalid platform for USB Gecko based early debugging.
  1101. #endif
  1102. /*
  1103. * The virtual address used must match the virtual address
  1104. * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
  1105. */
  1106. lis r11, 0xfffe /* top 128K */
  1107. ori r8, r8, 0x002a /* uncached, guarded ,rw */
  1108. ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
  1109. mtspr SPRN_DBAT1L, r8
  1110. mtspr SPRN_DBAT1U, r11
  1111. blr
  1112. #endif
  1113. .data