misc_64.S 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * This file contains miscellaneous low-level functions.
  4. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  5. *
  6. * Largely rewritten by Cort Dougan ([email protected])
  7. * and Paul Mackerras.
  8. * Adapted for iSeries by Mike Corrigan ([email protected])
  9. * PPC64 updates by Dave Engebretsen ([email protected])
  10. */
  11. #include <linux/sys.h>
  12. #include <asm/unistd.h>
  13. #include <asm/errno.h>
  14. #include <asm/processor.h>
  15. #include <asm/page.h>
  16. #include <asm/cache.h>
  17. #include <asm/ppc_asm.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/cputable.h>
  20. #include <asm/thread_info.h>
  21. #include <asm/kexec.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/mmu.h>
  24. #include <asm/export.h>
  25. #include <asm/feature-fixups.h>
  26. .text
  27. _GLOBAL(__bswapdi2)
  28. EXPORT_SYMBOL(__bswapdi2)
  29. srdi r8,r3,32
  30. rlwinm r7,r3,8,0xffffffff
  31. rlwimi r7,r3,24,0,7
  32. rlwinm r9,r8,8,0xffffffff
  33. rlwimi r7,r3,24,16,23
  34. rlwimi r9,r8,24,0,7
  35. rlwimi r9,r8,24,16,23
  36. sldi r7,r7,32
  37. or r3,r7,r9
  38. blr
  39. #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
  40. _GLOBAL(rmci_on)
  41. sync
  42. isync
  43. li r3,0x100
  44. rldicl r3,r3,32,0
  45. mfspr r5,SPRN_HID4
  46. or r5,r5,r3
  47. sync
  48. mtspr SPRN_HID4,r5
  49. isync
  50. slbia
  51. isync
  52. sync
  53. blr
  54. _GLOBAL(rmci_off)
  55. sync
  56. isync
  57. li r3,0x100
  58. rldicl r3,r3,32,0
  59. mfspr r5,SPRN_HID4
  60. andc r5,r5,r3
  61. sync
  62. mtspr SPRN_HID4,r5
  63. isync
  64. slbia
  65. isync
  66. sync
  67. blr
  68. #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
  69. #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
  70. /*
  71. * Do an IO access in real mode
  72. */
  73. _GLOBAL(real_readb)
  74. mfmsr r7
  75. ori r0,r7,MSR_DR
  76. xori r0,r0,MSR_DR
  77. sync
  78. mtmsrd r0
  79. sync
  80. isync
  81. mfspr r6,SPRN_HID4
  82. rldicl r5,r6,32,0
  83. ori r5,r5,0x100
  84. rldicl r5,r5,32,0
  85. sync
  86. mtspr SPRN_HID4,r5
  87. isync
  88. slbia
  89. isync
  90. lbz r3,0(r3)
  91. sync
  92. mtspr SPRN_HID4,r6
  93. isync
  94. slbia
  95. isync
  96. mtmsrd r7
  97. sync
  98. isync
  99. blr
  100. /*
  101. * Do an IO access in real mode
  102. */
  103. _GLOBAL(real_writeb)
  104. mfmsr r7
  105. ori r0,r7,MSR_DR
  106. xori r0,r0,MSR_DR
  107. sync
  108. mtmsrd r0
  109. sync
  110. isync
  111. mfspr r6,SPRN_HID4
  112. rldicl r5,r6,32,0
  113. ori r5,r5,0x100
  114. rldicl r5,r5,32,0
  115. sync
  116. mtspr SPRN_HID4,r5
  117. isync
  118. slbia
  119. isync
  120. stb r3,0(r4)
  121. sync
  122. mtspr SPRN_HID4,r6
  123. isync
  124. slbia
  125. isync
  126. mtmsrd r7
  127. sync
  128. isync
  129. blr
  130. #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
  131. #ifdef CONFIG_PPC_PASEMI
  132. _GLOBAL(real_205_readb)
  133. mfmsr r7
  134. ori r0,r7,MSR_DR
  135. xori r0,r0,MSR_DR
  136. sync
  137. mtmsrd r0
  138. sync
  139. isync
  140. LBZCIX(R3,R0,R3)
  141. isync
  142. mtmsrd r7
  143. sync
  144. isync
  145. blr
  146. _GLOBAL(real_205_writeb)
  147. mfmsr r7
  148. ori r0,r7,MSR_DR
  149. xori r0,r0,MSR_DR
  150. sync
  151. mtmsrd r0
  152. sync
  153. isync
  154. STBCIX(R3,R0,R4)
  155. isync
  156. mtmsrd r7
  157. sync
  158. isync
  159. blr
  160. #endif /* CONFIG_PPC_PASEMI */
  161. #if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
  162. /*
  163. * SCOM access functions for 970 (FX only for now)
  164. *
  165. * unsigned long scom970_read(unsigned int address);
  166. * void scom970_write(unsigned int address, unsigned long value);
  167. *
  168. * The address passed in is the 24 bits register address. This code
  169. * is 970 specific and will not check the status bits, so you should
  170. * know what you are doing.
  171. */
  172. _GLOBAL(scom970_read)
  173. /* interrupts off */
  174. mfmsr r4
  175. ori r0,r4,MSR_EE
  176. xori r0,r0,MSR_EE
  177. mtmsrd r0,1
  178. /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
  179. * (including parity). On current CPUs they must be 0'd,
  180. * and finally or in RW bit
  181. */
  182. rlwinm r3,r3,8,0,15
  183. ori r3,r3,0x8000
  184. /* do the actual scom read */
  185. sync
  186. mtspr SPRN_SCOMC,r3
  187. isync
  188. mfspr r3,SPRN_SCOMD
  189. isync
  190. mfspr r0,SPRN_SCOMC
  191. isync
  192. /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
  193. * that's the best we can do). Not implemented yet as we don't use
  194. * the scom on any of the bogus CPUs yet, but may have to be done
  195. * ultimately
  196. */
  197. /* restore interrupts */
  198. mtmsrd r4,1
  199. blr
  200. _GLOBAL(scom970_write)
  201. /* interrupts off */
  202. mfmsr r5
  203. ori r0,r5,MSR_EE
  204. xori r0,r0,MSR_EE
  205. mtmsrd r0,1
  206. /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
  207. * (including parity). On current CPUs they must be 0'd.
  208. */
  209. rlwinm r3,r3,8,0,15
  210. sync
  211. mtspr SPRN_SCOMD,r4 /* write data */
  212. isync
  213. mtspr SPRN_SCOMC,r3 /* write command */
  214. isync
  215. mfspr 3,SPRN_SCOMC
  216. isync
  217. /* restore interrupts */
  218. mtmsrd r5,1
  219. blr
  220. #endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
  221. /* kexec_wait(phys_cpu)
  222. *
  223. * wait for the flag to change, indicating this kernel is going away but
  224. * the slave code for the next one is at addresses 0 to 100.
  225. *
  226. * This is used by all slaves, even those that did not find a matching
  227. * paca in the secondary startup code.
  228. *
  229. * Physical (hardware) cpu id should be in r3.
  230. */
  231. _GLOBAL(kexec_wait)
  232. bcl 20,31,$+4
  233. 1: mflr r5
  234. addi r5,r5,kexec_flag-1b
  235. 99: HMT_LOW
  236. #ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
  237. lwz r4,0(r5)
  238. cmpwi 0,r4,0
  239. beq 99b
  240. #ifdef CONFIG_PPC_BOOK3S_64
  241. li r10,0x60
  242. mfmsr r11
  243. clrrdi r11,r11,1 /* Clear MSR_LE */
  244. mtsrr0 r10
  245. mtsrr1 r11
  246. rfid
  247. #else
  248. /* Create TLB entry in book3e_secondary_core_init */
  249. li r4,0
  250. ba 0x60
  251. #endif
  252. #endif
  253. /* this can be in text because we won't change it until we are
  254. * running in real anyways
  255. */
  256. kexec_flag:
  257. .long 0
  258. #ifdef CONFIG_KEXEC_CORE
  259. #ifdef CONFIG_PPC_BOOK3E_64
  260. /*
  261. * BOOK3E has no real MMU mode, so we have to setup the initial TLB
  262. * for a core to identity map v:0 to p:0. This current implementation
  263. * assumes that 1G is enough for kexec.
  264. */
  265. kexec_create_tlb:
  266. /*
  267. * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
  268. * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
  269. */
  270. PPC_TLBILX_ALL(0,R0)
  271. sync
  272. isync
  273. mfspr r10,SPRN_TLB1CFG
  274. andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
  275. subi r10,r10,1 /* Last entry: no conflict with kernel text */
  276. lis r9,MAS0_TLBSEL(1)@h
  277. rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
  278. /* Set up a temp identity mapping v:0 to p:0 and return to it. */
  279. mtspr SPRN_MAS0,r9
  280. lis r9,(MAS1_VALID|MAS1_IPROT)@h
  281. ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
  282. mtspr SPRN_MAS1,r9
  283. LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
  284. mtspr SPRN_MAS2,r9
  285. LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
  286. mtspr SPRN_MAS3,r9
  287. li r9,0
  288. mtspr SPRN_MAS7,r9
  289. tlbwe
  290. isync
  291. blr
  292. #endif
  293. /* kexec_smp_wait(void)
  294. *
  295. * call with interrupts off
  296. * note: this is a terminal routine, it does not save lr
  297. *
  298. * get phys id from paca
  299. * switch to real mode
  300. * mark the paca as no longer used
  301. * join other cpus in kexec_wait(phys_id)
  302. */
  303. _GLOBAL(kexec_smp_wait)
  304. lhz r3,PACAHWCPUID(r13)
  305. bl real_mode
  306. li r4,KEXEC_STATE_REAL_MODE
  307. stb r4,PACAKEXECSTATE(r13)
  308. b kexec_wait
  309. /*
  310. * switch to real mode (turn mmu off)
  311. * we use the early kernel trick that the hardware ignores bits
  312. * 0 and 1 (big endian) of the effective address in real mode
  313. *
  314. * don't overwrite r3 here, it is live for kexec_wait above.
  315. */
  316. real_mode: /* assume normal blr return */
  317. #ifdef CONFIG_PPC_BOOK3E_64
  318. /* Create an identity mapping. */
  319. b kexec_create_tlb
  320. #else
  321. 1: li r9,MSR_RI
  322. li r10,MSR_DR|MSR_IR
  323. mflr r11 /* return address to SRR0 */
  324. mfmsr r12
  325. andc r9,r12,r9
  326. andc r10,r12,r10
  327. mtmsrd r9,1
  328. mtspr SPRN_SRR1,r10
  329. mtspr SPRN_SRR0,r11
  330. rfid
  331. #endif
  332. /*
  333. * kexec_sequence(newstack, start, image, control, clear_all(),
  334. copy_with_mmu_off)
  335. *
  336. * does the grungy work with stack switching and real mode switches
  337. * also does simple calls to other code
  338. */
  339. _GLOBAL(kexec_sequence)
  340. mflr r0
  341. std r0,16(r1)
  342. /* switch stacks to newstack -- &kexec_stack.stack */
  343. stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
  344. mr r1,r3
  345. li r0,0
  346. std r0,16(r1)
  347. /* save regs for local vars on new stack.
  348. * yes, we won't go back, but ...
  349. */
  350. std r31,-8(r1)
  351. std r30,-16(r1)
  352. std r29,-24(r1)
  353. std r28,-32(r1)
  354. std r27,-40(r1)
  355. std r26,-48(r1)
  356. std r25,-56(r1)
  357. stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
  358. /* save args into preserved regs */
  359. mr r31,r3 /* newstack (both) */
  360. mr r30,r4 /* start (real) */
  361. mr r29,r5 /* image (virt) */
  362. mr r28,r6 /* control, unused */
  363. mr r27,r7 /* clear_all() fn desc */
  364. mr r26,r8 /* copy_with_mmu_off */
  365. lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
  366. /* disable interrupts, we are overwriting kernel data next */
  367. #ifdef CONFIG_PPC_BOOK3E_64
  368. wrteei 0
  369. #else
  370. mfmsr r3
  371. rlwinm r3,r3,0,17,15
  372. mtmsrd r3,1
  373. #endif
  374. /* We need to turn the MMU off unless we are in hash mode
  375. * under a hypervisor
  376. */
  377. cmpdi r26,0
  378. beq 1f
  379. bl real_mode
  380. 1:
  381. /* copy dest pages, flush whole dest image */
  382. mr r3,r29
  383. bl kexec_copy_flush /* (image) */
  384. /* turn off mmu now if not done earlier */
  385. cmpdi r26,0
  386. bne 1f
  387. bl real_mode
  388. /* copy 0x100 bytes starting at start to 0 */
  389. 1: li r3,0
  390. mr r4,r30 /* start, aka phys mem offset */
  391. li r5,0x100
  392. li r6,0
  393. bl copy_and_flush /* (dest, src, copy limit, start offset) */
  394. 1: /* assume normal blr return */
  395. /* release other cpus to the new kernel secondary start at 0x60 */
  396. mflr r5
  397. li r6,1
  398. stw r6,kexec_flag-1b(5)
  399. cmpdi r27,0
  400. beq 1f
  401. /* clear out hardware hash page table and tlb */
  402. #ifdef CONFIG_PPC64_ELF_ABI_V1
  403. ld r12,0(r27) /* deref function descriptor */
  404. #else
  405. mr r12,r27
  406. #endif
  407. mtctr r12
  408. bctrl /* mmu_hash_ops.hpte_clear_all(void); */
  409. /*
  410. * kexec image calling is:
  411. * the first 0x100 bytes of the entry point are copied to 0
  412. *
  413. * all slaves branch to slave = 0x60 (absolute)
  414. * slave(phys_cpu_id);
  415. *
  416. * master goes to start = entry point
  417. * start(phys_cpu_id, start, 0);
  418. *
  419. *
  420. * a wrapper is needed to call existing kernels, here is an approximate
  421. * description of one method:
  422. *
  423. * v2: (2.6.10)
  424. * start will be near the boot_block (maybe 0x100 bytes before it?)
  425. * it will have a 0x60, which will b to boot_block, where it will wait
  426. * and 0 will store phys into struct boot-block and load r3 from there,
  427. * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
  428. *
  429. * v1: (2.6.9)
  430. * boot block will have all cpus scanning device tree to see if they
  431. * are the boot cpu ?????
  432. * other device tree differences (prop sizes, va vs pa, etc)...
  433. */
  434. 1: mr r3,r25 # my phys cpu
  435. mr r4,r30 # start, aka phys mem offset
  436. mtlr 4
  437. li r5,0
  438. blr /* image->start(physid, image->start, 0); */
  439. #endif /* CONFIG_KEXEC_CORE */