smp.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * SMP support for power macintosh.
  4. *
  5. * We support both the old "powersurge" SMP architecture
  6. * and the current Core99 (G4 PowerMac) machines.
  7. *
  8. * Note that we don't support the very first rev. of
  9. * Apple/DayStar 2 CPUs board, the one with the funky
  10. * watchdog. Hopefully, none of these should be there except
  11. * maybe internally to Apple. I should probably still add some
  12. * code to detect this card though and disable SMP. --BenH.
  13. *
  14. * Support Macintosh G4 SMP by Troy Benjegerdes ([email protected])
  15. * and Ben Herrenschmidt <[email protected]>.
  16. *
  17. * Support for DayStar quad CPU cards
  18. * Copyright (C) XLR8, Inc. 1994-2000
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/sched/hotplug.h>
  23. #include <linux/smp.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irqdomain.h>
  26. #include <linux/kernel_stat.h>
  27. #include <linux/delay.h>
  28. #include <linux/init.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/errno.h>
  31. #include <linux/hardirq.h>
  32. #include <linux/cpu.h>
  33. #include <linux/compiler.h>
  34. #include <linux/pgtable.h>
  35. #include <asm/ptrace.h>
  36. #include <linux/atomic.h>
  37. #include <asm/code-patching.h>
  38. #include <asm/irq.h>
  39. #include <asm/page.h>
  40. #include <asm/sections.h>
  41. #include <asm/io.h>
  42. #include <asm/smp.h>
  43. #include <asm/machdep.h>
  44. #include <asm/pmac_feature.h>
  45. #include <asm/time.h>
  46. #include <asm/mpic.h>
  47. #include <asm/cacheflush.h>
  48. #include <asm/keylargo.h>
  49. #include <asm/pmac_low_i2c.h>
  50. #include <asm/pmac_pfunc.h>
  51. #include <asm/inst.h>
  52. #include "pmac.h"
  53. #undef DEBUG
  54. #ifdef DEBUG
  55. #define DBG(fmt...) udbg_printf(fmt)
  56. #else
  57. #define DBG(fmt...)
  58. #endif
  59. extern void __secondary_start_pmac_0(void);
  60. static void (*pmac_tb_freeze)(int freeze);
  61. static u64 timebase;
  62. static int tb_req;
  63. #ifdef CONFIG_PPC_PMAC32_PSURGE
  64. /*
  65. * Powersurge (old powermac SMP) support.
  66. */
  67. /* Addresses for powersurge registers */
  68. #define HAMMERHEAD_BASE 0xf8000000
  69. #define HHEAD_CONFIG 0x90
  70. #define HHEAD_SEC_INTR 0xc0
  71. /* register for interrupting the primary processor on the powersurge */
  72. /* N.B. this is actually the ethernet ROM! */
  73. #define PSURGE_PRI_INTR 0xf3019000
  74. /* register for storing the start address for the secondary processor */
  75. /* N.B. this is the PCI config space address register for the 1st bridge */
  76. #define PSURGE_START 0xf2800000
  77. /* Daystar/XLR8 4-CPU card */
  78. #define PSURGE_QUAD_REG_ADDR 0xf8800000
  79. #define PSURGE_QUAD_IRQ_SET 0
  80. #define PSURGE_QUAD_IRQ_CLR 1
  81. #define PSURGE_QUAD_IRQ_PRIMARY 2
  82. #define PSURGE_QUAD_CKSTOP_CTL 3
  83. #define PSURGE_QUAD_PRIMARY_ARB 4
  84. #define PSURGE_QUAD_BOARD_ID 6
  85. #define PSURGE_QUAD_WHICH_CPU 7
  86. #define PSURGE_QUAD_CKSTOP_RDBK 8
  87. #define PSURGE_QUAD_RESET_CTL 11
  88. #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
  89. #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
  90. #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
  91. #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
  92. /* virtual addresses for the above */
  93. static volatile u8 __iomem *hhead_base;
  94. static volatile u8 __iomem *quad_base;
  95. static volatile u32 __iomem *psurge_pri_intr;
  96. static volatile u8 __iomem *psurge_sec_intr;
  97. static volatile u32 __iomem *psurge_start;
  98. /* values for psurge_type */
  99. #define PSURGE_NONE -1
  100. #define PSURGE_DUAL 0
  101. #define PSURGE_QUAD_OKEE 1
  102. #define PSURGE_QUAD_COTTON 2
  103. #define PSURGE_QUAD_ICEGRASS 3
  104. /* what sort of powersurge board we have */
  105. static int psurge_type = PSURGE_NONE;
  106. /* irq for secondary cpus to report */
  107. static struct irq_domain *psurge_host;
  108. int psurge_secondary_virq;
  109. /*
  110. * Set and clear IPIs for powersurge.
  111. */
  112. static inline void psurge_set_ipi(int cpu)
  113. {
  114. if (psurge_type == PSURGE_NONE)
  115. return;
  116. if (cpu == 0)
  117. in_be32(psurge_pri_intr);
  118. else if (psurge_type == PSURGE_DUAL)
  119. out_8(psurge_sec_intr, 0);
  120. else
  121. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
  122. }
  123. static inline void psurge_clr_ipi(int cpu)
  124. {
  125. if (cpu > 0) {
  126. switch(psurge_type) {
  127. case PSURGE_DUAL:
  128. out_8(psurge_sec_intr, ~0);
  129. break;
  130. case PSURGE_NONE:
  131. break;
  132. default:
  133. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
  134. }
  135. }
  136. }
  137. /*
  138. * On powersurge (old SMP powermac architecture) we don't have
  139. * separate IPIs for separate messages like openpic does. Instead
  140. * use the generic demux helpers
  141. * -- paulus.
  142. */
  143. static irqreturn_t psurge_ipi_intr(int irq, void *d)
  144. {
  145. psurge_clr_ipi(smp_processor_id());
  146. smp_ipi_demux();
  147. return IRQ_HANDLED;
  148. }
  149. static void smp_psurge_cause_ipi(int cpu)
  150. {
  151. psurge_set_ipi(cpu);
  152. }
  153. static int psurge_host_map(struct irq_domain *h, unsigned int virq,
  154. irq_hw_number_t hw)
  155. {
  156. irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
  157. return 0;
  158. }
  159. static const struct irq_domain_ops psurge_host_ops = {
  160. .map = psurge_host_map,
  161. };
  162. static int __init psurge_secondary_ipi_init(void)
  163. {
  164. int rc = -ENOMEM;
  165. psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL);
  166. if (psurge_host)
  167. psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
  168. if (psurge_secondary_virq)
  169. rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
  170. IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL);
  171. if (rc)
  172. pr_err("Failed to setup secondary cpu IPI\n");
  173. return rc;
  174. }
  175. /*
  176. * Determine a quad card presence. We read the board ID register, we
  177. * force the data bus to change to something else, and we read it again.
  178. * It it's stable, then the register probably exist (ugh !)
  179. */
  180. static int __init psurge_quad_probe(void)
  181. {
  182. int type;
  183. unsigned int i;
  184. type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
  185. if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
  186. || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  187. return PSURGE_DUAL;
  188. /* looks OK, try a slightly more rigorous test */
  189. /* bogus is not necessarily cacheline-aligned,
  190. though I don't suppose that really matters. -- paulus */
  191. for (i = 0; i < 100; i++) {
  192. volatile u32 bogus[8];
  193. bogus[(0+i)%8] = 0x00000000;
  194. bogus[(1+i)%8] = 0x55555555;
  195. bogus[(2+i)%8] = 0xFFFFFFFF;
  196. bogus[(3+i)%8] = 0xAAAAAAAA;
  197. bogus[(4+i)%8] = 0x33333333;
  198. bogus[(5+i)%8] = 0xCCCCCCCC;
  199. bogus[(6+i)%8] = 0xCCCCCCCC;
  200. bogus[(7+i)%8] = 0x33333333;
  201. wmb();
  202. asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
  203. mb();
  204. if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  205. return PSURGE_DUAL;
  206. }
  207. return type;
  208. }
  209. static void __init psurge_quad_init(void)
  210. {
  211. int procbits;
  212. if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
  213. procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
  214. if (psurge_type == PSURGE_QUAD_ICEGRASS)
  215. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  216. else
  217. PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
  218. mdelay(33);
  219. out_8(psurge_sec_intr, ~0);
  220. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
  221. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  222. if (psurge_type != PSURGE_QUAD_ICEGRASS)
  223. PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
  224. PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
  225. mdelay(33);
  226. PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
  227. mdelay(33);
  228. PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
  229. mdelay(33);
  230. }
  231. static void __init smp_psurge_probe(void)
  232. {
  233. int i, ncpus;
  234. struct device_node *dn;
  235. /*
  236. * The powersurge cpu board can be used in the generation
  237. * of powermacs that have a socket for an upgradeable cpu card,
  238. * including the 7500, 8500, 9500, 9600.
  239. * The device tree doesn't tell you if you have 2 cpus because
  240. * OF doesn't know anything about the 2nd processor.
  241. * Instead we look for magic bits in magic registers,
  242. * in the hammerhead memory controller in the case of the
  243. * dual-cpu powersurge board. -- paulus.
  244. */
  245. dn = of_find_node_by_name(NULL, "hammerhead");
  246. if (dn == NULL)
  247. return;
  248. of_node_put(dn);
  249. hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
  250. quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
  251. psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
  252. psurge_type = psurge_quad_probe();
  253. if (psurge_type != PSURGE_DUAL) {
  254. psurge_quad_init();
  255. /* All released cards using this HW design have 4 CPUs */
  256. ncpus = 4;
  257. /* No sure how timebase sync works on those, let's use SW */
  258. smp_ops->give_timebase = smp_generic_give_timebase;
  259. smp_ops->take_timebase = smp_generic_take_timebase;
  260. } else {
  261. iounmap(quad_base);
  262. if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
  263. /* not a dual-cpu card */
  264. iounmap(hhead_base);
  265. psurge_type = PSURGE_NONE;
  266. return;
  267. }
  268. ncpus = 2;
  269. }
  270. if (psurge_secondary_ipi_init())
  271. return;
  272. psurge_start = ioremap(PSURGE_START, 4);
  273. psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
  274. /* This is necessary because OF doesn't know about the
  275. * secondary cpu(s), and thus there aren't nodes in the
  276. * device tree for them, and smp_setup_cpu_maps hasn't
  277. * set their bits in cpu_present_mask.
  278. */
  279. if (ncpus > NR_CPUS)
  280. ncpus = NR_CPUS;
  281. for (i = 1; i < ncpus ; ++i)
  282. set_cpu_present(i, true);
  283. if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
  284. }
  285. static int __init smp_psurge_kick_cpu(int nr)
  286. {
  287. unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
  288. unsigned long a, flags;
  289. int i, j;
  290. /* Defining this here is evil ... but I prefer hiding that
  291. * crap to avoid giving people ideas that they can do the
  292. * same.
  293. */
  294. extern volatile unsigned int cpu_callin_map[NR_CPUS];
  295. /* may need to flush here if secondary bats aren't setup */
  296. for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
  297. asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
  298. asm volatile("sync");
  299. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
  300. /* This is going to freeze the timeebase, we disable interrupts */
  301. local_irq_save(flags);
  302. out_be32(psurge_start, start);
  303. mb();
  304. psurge_set_ipi(nr);
  305. /*
  306. * We can't use udelay here because the timebase is now frozen.
  307. */
  308. for (i = 0; i < 2000; ++i)
  309. asm volatile("nop" : : : "memory");
  310. psurge_clr_ipi(nr);
  311. /*
  312. * Also, because the timebase is frozen, we must not return to the
  313. * caller which will try to do udelay's etc... Instead, we wait -here-
  314. * for the CPU to callin.
  315. */
  316. for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
  317. for (j = 1; j < 10000; j++)
  318. asm volatile("nop" : : : "memory");
  319. asm volatile("sync" : : : "memory");
  320. }
  321. if (!cpu_callin_map[nr])
  322. goto stuck;
  323. /* And we do the TB sync here too for standard dual CPU cards */
  324. if (psurge_type == PSURGE_DUAL) {
  325. while(!tb_req)
  326. barrier();
  327. tb_req = 0;
  328. mb();
  329. timebase = get_tb();
  330. mb();
  331. while (timebase)
  332. barrier();
  333. mb();
  334. }
  335. stuck:
  336. /* now interrupt the secondary, restarting both TBs */
  337. if (psurge_type == PSURGE_DUAL)
  338. psurge_set_ipi(1);
  339. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
  340. return 0;
  341. }
  342. static void __init smp_psurge_setup_cpu(int cpu_nr)
  343. {
  344. unsigned long flags = IRQF_PERCPU | IRQF_NO_THREAD;
  345. int irq;
  346. if (cpu_nr != 0 || !psurge_start)
  347. return;
  348. /* reset the entry point so if we get another intr we won't
  349. * try to startup again */
  350. out_be32(psurge_start, 0x100);
  351. irq = irq_create_mapping(NULL, 30);
  352. if (request_irq(irq, psurge_ipi_intr, flags, "primary IPI", NULL))
  353. printk(KERN_ERR "Couldn't get primary IPI interrupt");
  354. }
  355. void __init smp_psurge_take_timebase(void)
  356. {
  357. if (psurge_type != PSURGE_DUAL)
  358. return;
  359. tb_req = 1;
  360. mb();
  361. while (!timebase)
  362. barrier();
  363. mb();
  364. set_tb(timebase >> 32, timebase & 0xffffffff);
  365. timebase = 0;
  366. mb();
  367. set_dec(tb_ticks_per_jiffy/2);
  368. }
  369. void __init smp_psurge_give_timebase(void)
  370. {
  371. /* Nothing to do here */
  372. }
  373. /* PowerSurge-style Macs */
  374. struct smp_ops_t psurge_smp_ops = {
  375. .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
  376. .cause_ipi = smp_psurge_cause_ipi,
  377. .cause_nmi_ipi = NULL,
  378. .probe = smp_psurge_probe,
  379. .kick_cpu = smp_psurge_kick_cpu,
  380. .setup_cpu = smp_psurge_setup_cpu,
  381. .give_timebase = smp_psurge_give_timebase,
  382. .take_timebase = smp_psurge_take_timebase,
  383. };
  384. #endif /* CONFIG_PPC_PMAC32_PSURGE */
  385. /*
  386. * Core 99 and later support
  387. */
  388. static void smp_core99_give_timebase(void)
  389. {
  390. unsigned long flags;
  391. local_irq_save(flags);
  392. while(!tb_req)
  393. barrier();
  394. tb_req = 0;
  395. (*pmac_tb_freeze)(1);
  396. mb();
  397. timebase = get_tb();
  398. mb();
  399. while (timebase)
  400. barrier();
  401. mb();
  402. (*pmac_tb_freeze)(0);
  403. mb();
  404. local_irq_restore(flags);
  405. }
  406. static void smp_core99_take_timebase(void)
  407. {
  408. unsigned long flags;
  409. local_irq_save(flags);
  410. tb_req = 1;
  411. mb();
  412. while (!timebase)
  413. barrier();
  414. mb();
  415. set_tb(timebase >> 32, timebase & 0xffffffff);
  416. timebase = 0;
  417. mb();
  418. local_irq_restore(flags);
  419. }
  420. #ifdef CONFIG_PPC64
  421. /*
  422. * G5s enable/disable the timebase via an i2c-connected clock chip.
  423. */
  424. static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
  425. static u8 pmac_tb_pulsar_addr;
  426. static void smp_core99_cypress_tb_freeze(int freeze)
  427. {
  428. u8 data;
  429. int rc;
  430. /* Strangely, the device-tree says address is 0xd2, but darwin
  431. * accesses 0xd0 ...
  432. */
  433. pmac_i2c_setmode(pmac_tb_clock_chip_host,
  434. pmac_i2c_mode_combined);
  435. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  436. 0xd0 | pmac_i2c_read,
  437. 1, 0x81, &data, 1);
  438. if (rc != 0)
  439. goto bail;
  440. data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
  441. pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
  442. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  443. 0xd0 | pmac_i2c_write,
  444. 1, 0x81, &data, 1);
  445. bail:
  446. if (rc != 0) {
  447. printk("Cypress Timebase %s rc: %d\n",
  448. freeze ? "freeze" : "unfreeze", rc);
  449. panic("Timebase freeze failed !\n");
  450. }
  451. }
  452. static void smp_core99_pulsar_tb_freeze(int freeze)
  453. {
  454. u8 data;
  455. int rc;
  456. pmac_i2c_setmode(pmac_tb_clock_chip_host,
  457. pmac_i2c_mode_combined);
  458. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  459. pmac_tb_pulsar_addr | pmac_i2c_read,
  460. 1, 0x2e, &data, 1);
  461. if (rc != 0)
  462. goto bail;
  463. data = (data & 0x88) | (freeze ? 0x11 : 0x22);
  464. pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
  465. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  466. pmac_tb_pulsar_addr | pmac_i2c_write,
  467. 1, 0x2e, &data, 1);
  468. bail:
  469. if (rc != 0) {
  470. printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
  471. freeze ? "freeze" : "unfreeze", rc);
  472. panic("Timebase freeze failed !\n");
  473. }
  474. }
  475. static void __init smp_core99_setup_i2c_hwsync(int ncpus)
  476. {
  477. struct device_node *cc = NULL;
  478. struct device_node *p;
  479. const char *name = NULL;
  480. const u32 *reg;
  481. int ok;
  482. /* Look for the clock chip */
  483. for_each_node_by_name(cc, "i2c-hwclock") {
  484. p = of_get_parent(cc);
  485. ok = p && of_device_is_compatible(p, "uni-n-i2c");
  486. of_node_put(p);
  487. if (!ok)
  488. continue;
  489. pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
  490. if (pmac_tb_clock_chip_host == NULL)
  491. continue;
  492. reg = of_get_property(cc, "reg", NULL);
  493. if (reg == NULL)
  494. continue;
  495. switch (*reg) {
  496. case 0xd2:
  497. if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) {
  498. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  499. pmac_tb_pulsar_addr = 0xd2;
  500. name = "Pulsar";
  501. } else if (of_device_is_compatible(cc, "cy28508")) {
  502. pmac_tb_freeze = smp_core99_cypress_tb_freeze;
  503. name = "Cypress";
  504. }
  505. break;
  506. case 0xd4:
  507. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  508. pmac_tb_pulsar_addr = 0xd4;
  509. name = "Pulsar";
  510. break;
  511. }
  512. if (pmac_tb_freeze != NULL)
  513. break;
  514. }
  515. if (pmac_tb_freeze != NULL) {
  516. /* Open i2c bus for synchronous access */
  517. if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
  518. printk(KERN_ERR "Failed top open i2c bus for clock"
  519. " sync, fallback to software sync !\n");
  520. goto no_i2c_sync;
  521. }
  522. printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
  523. name);
  524. return;
  525. }
  526. no_i2c_sync:
  527. pmac_tb_freeze = NULL;
  528. pmac_tb_clock_chip_host = NULL;
  529. }
  530. /*
  531. * Newer G5s uses a platform function
  532. */
  533. static void smp_core99_pfunc_tb_freeze(int freeze)
  534. {
  535. struct device_node *cpus;
  536. struct pmf_args args;
  537. cpus = of_find_node_by_path("/cpus");
  538. BUG_ON(cpus == NULL);
  539. args.count = 1;
  540. args.u[0].v = !freeze;
  541. pmf_call_function(cpus, "cpu-timebase", &args);
  542. of_node_put(cpus);
  543. }
  544. #else /* CONFIG_PPC64 */
  545. /*
  546. * SMP G4 use a GPIO to enable/disable the timebase.
  547. */
  548. static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
  549. static void smp_core99_gpio_tb_freeze(int freeze)
  550. {
  551. if (freeze)
  552. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
  553. else
  554. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
  555. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  556. }
  557. #endif /* !CONFIG_PPC64 */
  558. static void core99_init_caches(int cpu)
  559. {
  560. #ifndef CONFIG_PPC64
  561. /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
  562. static long int core99_l2_cache;
  563. static long int core99_l3_cache;
  564. if (!cpu_has_feature(CPU_FTR_L2CR))
  565. return;
  566. if (cpu == 0) {
  567. core99_l2_cache = _get_L2CR();
  568. printk("CPU0: L2CR is %lx\n", core99_l2_cache);
  569. } else {
  570. printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
  571. _set_L2CR(0);
  572. _set_L2CR(core99_l2_cache);
  573. printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
  574. }
  575. if (!cpu_has_feature(CPU_FTR_L3CR))
  576. return;
  577. if (cpu == 0){
  578. core99_l3_cache = _get_L3CR();
  579. printk("CPU0: L3CR is %lx\n", core99_l3_cache);
  580. } else {
  581. printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
  582. _set_L3CR(0);
  583. _set_L3CR(core99_l3_cache);
  584. printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
  585. }
  586. #endif /* !CONFIG_PPC64 */
  587. }
  588. static void __init smp_core99_setup(int ncpus)
  589. {
  590. #ifdef CONFIG_PPC64
  591. /* i2c based HW sync on some G5s */
  592. if (of_machine_is_compatible("PowerMac7,2") ||
  593. of_machine_is_compatible("PowerMac7,3") ||
  594. of_machine_is_compatible("RackMac3,1"))
  595. smp_core99_setup_i2c_hwsync(ncpus);
  596. /* pfunc based HW sync on recent G5s */
  597. if (pmac_tb_freeze == NULL) {
  598. struct device_node *cpus =
  599. of_find_node_by_path("/cpus");
  600. if (cpus &&
  601. of_get_property(cpus, "platform-cpu-timebase", NULL)) {
  602. pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
  603. printk(KERN_INFO "Processor timebase sync using"
  604. " platform function\n");
  605. }
  606. of_node_put(cpus);
  607. }
  608. #else /* CONFIG_PPC64 */
  609. /* GPIO based HW sync on ppc32 Core99 */
  610. if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
  611. struct device_node *cpu;
  612. const u32 *tbprop = NULL;
  613. core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
  614. cpu = of_find_node_by_type(NULL, "cpu");
  615. if (cpu != NULL) {
  616. tbprop = of_get_property(cpu, "timebase-enable", NULL);
  617. if (tbprop)
  618. core99_tb_gpio = *tbprop;
  619. of_node_put(cpu);
  620. }
  621. pmac_tb_freeze = smp_core99_gpio_tb_freeze;
  622. printk(KERN_INFO "Processor timebase sync using"
  623. " GPIO 0x%02x\n", core99_tb_gpio);
  624. }
  625. #endif /* CONFIG_PPC64 */
  626. /* No timebase sync, fallback to software */
  627. if (pmac_tb_freeze == NULL) {
  628. smp_ops->give_timebase = smp_generic_give_timebase;
  629. smp_ops->take_timebase = smp_generic_take_timebase;
  630. printk(KERN_INFO "Processor timebase sync using software\n");
  631. }
  632. #ifndef CONFIG_PPC64
  633. {
  634. int i;
  635. /* XXX should get this from reg properties */
  636. for (i = 1; i < ncpus; ++i)
  637. set_hard_smp_processor_id(i, i);
  638. }
  639. #endif
  640. /* 32 bits SMP can't NAP */
  641. if (!of_machine_is_compatible("MacRISC4"))
  642. powersave_nap = 0;
  643. }
  644. static void __init smp_core99_probe(void)
  645. {
  646. struct device_node *cpus;
  647. int ncpus = 0;
  648. if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
  649. /* Count CPUs in the device-tree */
  650. for_each_node_by_type(cpus, "cpu")
  651. ++ncpus;
  652. printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
  653. /* Nothing more to do if less than 2 of them */
  654. if (ncpus <= 1)
  655. return;
  656. /* We need to perform some early initialisations before we can start
  657. * setting up SMP as we are running before initcalls
  658. */
  659. pmac_pfunc_base_install();
  660. pmac_i2c_init();
  661. /* Setup various bits like timebase sync method, ability to nap, ... */
  662. smp_core99_setup(ncpus);
  663. /* Install IPIs */
  664. mpic_request_ipis();
  665. /* Collect l2cr and l3cr values from CPU 0 */
  666. core99_init_caches(0);
  667. }
  668. static int smp_core99_kick_cpu(int nr)
  669. {
  670. unsigned int save_vector;
  671. unsigned long target, flags;
  672. unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
  673. if (nr < 0 || nr > 3)
  674. return -ENOENT;
  675. if (ppc_md.progress)
  676. ppc_md.progress("smp_core99_kick_cpu", 0x346);
  677. local_irq_save(flags);
  678. /* Save reset vector */
  679. save_vector = *vector;
  680. /* Setup fake reset vector that does
  681. * b __secondary_start_pmac_0 + nr*8
  682. */
  683. target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
  684. patch_branch(vector, target, BRANCH_SET_LINK);
  685. /* Put some life in our friend */
  686. pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
  687. /* FIXME: We wait a bit for the CPU to take the exception, I should
  688. * instead wait for the entry code to set something for me. Well,
  689. * ideally, all that crap will be done in prom.c and the CPU left
  690. * in a RAM-based wait loop like CHRP.
  691. */
  692. mdelay(1);
  693. /* Restore our exception vector */
  694. patch_instruction(vector, ppc_inst(save_vector));
  695. local_irq_restore(flags);
  696. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
  697. return 0;
  698. }
  699. static void smp_core99_setup_cpu(int cpu_nr)
  700. {
  701. /* Setup L2/L3 */
  702. if (cpu_nr != 0)
  703. core99_init_caches(cpu_nr);
  704. /* Setup openpic */
  705. mpic_setup_this_cpu();
  706. }
  707. #ifdef CONFIG_PPC64
  708. #ifdef CONFIG_HOTPLUG_CPU
  709. static unsigned int smp_core99_host_open;
  710. static int smp_core99_cpu_prepare(unsigned int cpu)
  711. {
  712. int rc;
  713. /* Open i2c bus if it was used for tb sync */
  714. if (pmac_tb_clock_chip_host && !smp_core99_host_open) {
  715. rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
  716. if (rc) {
  717. pr_err("Failed to open i2c bus for time sync\n");
  718. return notifier_from_errno(rc);
  719. }
  720. smp_core99_host_open = 1;
  721. }
  722. return 0;
  723. }
  724. static int smp_core99_cpu_online(unsigned int cpu)
  725. {
  726. /* Close i2c bus if it was used for tb sync */
  727. if (pmac_tb_clock_chip_host && smp_core99_host_open) {
  728. pmac_i2c_close(pmac_tb_clock_chip_host);
  729. smp_core99_host_open = 0;
  730. }
  731. return 0;
  732. }
  733. #endif /* CONFIG_HOTPLUG_CPU */
  734. static void __init smp_core99_bringup_done(void)
  735. {
  736. /* Close i2c bus if it was used for tb sync */
  737. if (pmac_tb_clock_chip_host)
  738. pmac_i2c_close(pmac_tb_clock_chip_host);
  739. /* If we didn't start the second CPU, we must take
  740. * it off the bus.
  741. */
  742. if (of_machine_is_compatible("MacRISC4") &&
  743. num_online_cpus() < 2) {
  744. set_cpu_present(1, false);
  745. g5_phy_disable_cpu1();
  746. }
  747. #ifdef CONFIG_HOTPLUG_CPU
  748. cpuhp_setup_state_nocalls(CPUHP_POWERPC_PMAC_PREPARE,
  749. "powerpc/pmac:prepare", smp_core99_cpu_prepare,
  750. NULL);
  751. cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/pmac:online",
  752. smp_core99_cpu_online, NULL);
  753. #endif
  754. if (ppc_md.progress)
  755. ppc_md.progress("smp_core99_bringup_done", 0x349);
  756. }
  757. #endif /* CONFIG_PPC64 */
  758. #ifdef CONFIG_HOTPLUG_CPU
  759. static int smp_core99_cpu_disable(void)
  760. {
  761. int rc = generic_cpu_disable();
  762. if (rc)
  763. return rc;
  764. mpic_cpu_set_priority(0xf);
  765. cleanup_cpu_mmu_context();
  766. return 0;
  767. }
  768. #ifdef CONFIG_PPC32
  769. static void pmac_cpu_offline_self(void)
  770. {
  771. int cpu = smp_processor_id();
  772. local_irq_disable();
  773. idle_task_exit();
  774. pr_debug("CPU%d offline\n", cpu);
  775. generic_set_cpu_dead(cpu);
  776. smp_wmb();
  777. mb();
  778. low_cpu_offline_self();
  779. }
  780. #else /* CONFIG_PPC32 */
  781. static void pmac_cpu_offline_self(void)
  782. {
  783. int cpu = smp_processor_id();
  784. local_irq_disable();
  785. idle_task_exit();
  786. /*
  787. * turn off as much as possible, we'll be
  788. * kicked out as this will only be invoked
  789. * on core99 platforms for now ...
  790. */
  791. printk(KERN_INFO "CPU#%d offline\n", cpu);
  792. generic_set_cpu_dead(cpu);
  793. smp_wmb();
  794. /*
  795. * Re-enable interrupts. The NAP code needs to enable them
  796. * anyways, do it now so we deal with the case where one already
  797. * happened while soft-disabled.
  798. * We shouldn't get any external interrupts, only decrementer, and the
  799. * decrementer handler is safe for use on offline CPUs
  800. */
  801. local_irq_enable();
  802. while (1) {
  803. /* let's not take timer interrupts too often ... */
  804. set_dec(0x7fffffff);
  805. /* Enter NAP mode */
  806. power4_idle();
  807. }
  808. }
  809. #endif /* else CONFIG_PPC32 */
  810. #endif /* CONFIG_HOTPLUG_CPU */
  811. /* Core99 Macs (dual G4s and G5s) */
  812. static struct smp_ops_t core99_smp_ops = {
  813. .message_pass = smp_mpic_message_pass,
  814. .probe = smp_core99_probe,
  815. #ifdef CONFIG_PPC64
  816. .bringup_done = smp_core99_bringup_done,
  817. #endif
  818. .kick_cpu = smp_core99_kick_cpu,
  819. .setup_cpu = smp_core99_setup_cpu,
  820. .give_timebase = smp_core99_give_timebase,
  821. .take_timebase = smp_core99_take_timebase,
  822. #if defined(CONFIG_HOTPLUG_CPU)
  823. .cpu_disable = smp_core99_cpu_disable,
  824. .cpu_die = generic_cpu_die,
  825. #endif
  826. };
  827. void __init pmac_setup_smp(void)
  828. {
  829. struct device_node *np;
  830. /* Check for Core99 */
  831. np = of_find_node_by_name(NULL, "uni-n");
  832. if (!np)
  833. np = of_find_node_by_name(NULL, "u3");
  834. if (!np)
  835. np = of_find_node_by_name(NULL, "u4");
  836. if (np) {
  837. of_node_put(np);
  838. smp_ops = &core99_smp_ops;
  839. }
  840. #ifdef CONFIG_PPC_PMAC32_PSURGE
  841. else {
  842. /* We have to set bits in cpu_possible_mask here since the
  843. * secondary CPU(s) aren't in the device tree. Various
  844. * things won't be initialized for CPUs not in the possible
  845. * map, so we really need to fix it up here.
  846. */
  847. int cpu;
  848. for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
  849. set_cpu_possible(cpu, true);
  850. smp_ops = &psurge_smp_ops;
  851. }
  852. #endif /* CONFIG_PPC_PMAC32_PSURGE */
  853. #ifdef CONFIG_HOTPLUG_CPU
  854. smp_ops->cpu_offline_self = pmac_cpu_offline_self;
  855. #endif
  856. }