smp-cps.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2013 Imagination Technologies
  4. * Author: Paul Burton <[email protected]>
  5. */
  6. #include <linux/cpu.h>
  7. #include <linux/delay.h>
  8. #include <linux/io.h>
  9. #include <linux/sched/task_stack.h>
  10. #include <linux/sched/hotplug.h>
  11. #include <linux/slab.h>
  12. #include <linux/smp.h>
  13. #include <linux/types.h>
  14. #include <linux/irq.h>
  15. #include <asm/bcache.h>
  16. #include <asm/mips-cps.h>
  17. #include <asm/mips_mt.h>
  18. #include <asm/mipsregs.h>
  19. #include <asm/pm-cps.h>
  20. #include <asm/r4kcache.h>
  21. #include <asm/smp-cps.h>
  22. #include <asm/time.h>
  23. #include <asm/uasm.h>
  24. static bool threads_disabled;
  25. static DECLARE_BITMAP(core_power, NR_CPUS);
  26. struct core_boot_config *mips_cps_core_bootcfg;
  27. static int __init setup_nothreads(char *s)
  28. {
  29. threads_disabled = true;
  30. return 0;
  31. }
  32. early_param("nothreads", setup_nothreads);
  33. static unsigned core_vpe_count(unsigned int cluster, unsigned core)
  34. {
  35. if (threads_disabled)
  36. return 1;
  37. return mips_cps_numvps(cluster, core);
  38. }
  39. static void __init cps_smp_setup(void)
  40. {
  41. unsigned int nclusters, ncores, nvpes, core_vpes;
  42. unsigned long core_entry;
  43. int cl, c, v;
  44. /* Detect & record VPE topology */
  45. nvpes = 0;
  46. nclusters = mips_cps_numclusters();
  47. pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
  48. for (cl = 0; cl < nclusters; cl++) {
  49. if (cl > 0)
  50. pr_cont(",");
  51. pr_cont("{");
  52. ncores = mips_cps_numcores(cl);
  53. for (c = 0; c < ncores; c++) {
  54. core_vpes = core_vpe_count(cl, c);
  55. if (c > 0)
  56. pr_cont(",");
  57. pr_cont("%u", core_vpes);
  58. /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
  59. if (!cl && !c)
  60. smp_num_siblings = core_vpes;
  61. for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
  62. cpu_set_cluster(&cpu_data[nvpes + v], cl);
  63. cpu_set_core(&cpu_data[nvpes + v], c);
  64. cpu_set_vpe_id(&cpu_data[nvpes + v], v);
  65. }
  66. nvpes += core_vpes;
  67. }
  68. pr_cont("}");
  69. }
  70. pr_cont(" total %u\n", nvpes);
  71. /* Indicate present CPUs (CPU being synonymous with VPE) */
  72. for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
  73. set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
  74. set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
  75. __cpu_number_map[v] = v;
  76. __cpu_logical_map[v] = v;
  77. }
  78. /* Set a coherent default CCA (CWB) */
  79. change_c0_config(CONF_CM_CMASK, 0x5);
  80. /* Core 0 is powered up (we're running on it) */
  81. bitmap_set(core_power, 0, 1);
  82. /* Initialise core 0 */
  83. mips_cps_core_init();
  84. /* Make core 0 coherent with everything */
  85. write_gcr_cl_coherence(0xff);
  86. if (mips_cm_revision() >= CM_REV_CM3) {
  87. core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
  88. write_gcr_bev_base(core_entry);
  89. }
  90. #ifdef CONFIG_MIPS_MT_FPAFF
  91. /* If we have an FPU, enroll ourselves in the FPU-full mask */
  92. if (cpu_has_fpu)
  93. cpumask_set_cpu(0, &mt_fpu_cpumask);
  94. #endif /* CONFIG_MIPS_MT_FPAFF */
  95. }
  96. static void __init cps_prepare_cpus(unsigned int max_cpus)
  97. {
  98. unsigned ncores, core_vpes, c, cca;
  99. bool cca_unsuitable, cores_limited;
  100. u32 *entry_code;
  101. mips_mt_set_cpuoptions();
  102. /* Detect whether the CCA is unsuited to multi-core SMP */
  103. cca = read_c0_config() & CONF_CM_CMASK;
  104. switch (cca) {
  105. case 0x4: /* CWBE */
  106. case 0x5: /* CWB */
  107. /* The CCA is coherent, multi-core is fine */
  108. cca_unsuitable = false;
  109. break;
  110. default:
  111. /* CCA is not coherent, multi-core is not usable */
  112. cca_unsuitable = true;
  113. }
  114. /* Warn the user if the CCA prevents multi-core */
  115. cores_limited = false;
  116. if (cca_unsuitable || cpu_has_dc_aliases) {
  117. for_each_present_cpu(c) {
  118. if (cpus_are_siblings(smp_processor_id(), c))
  119. continue;
  120. set_cpu_present(c, false);
  121. cores_limited = true;
  122. }
  123. }
  124. if (cores_limited)
  125. pr_warn("Using only one core due to %s%s%s\n",
  126. cca_unsuitable ? "unsuitable CCA" : "",
  127. (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
  128. cpu_has_dc_aliases ? "dcache aliasing" : "");
  129. /*
  130. * Patch the start of mips_cps_core_entry to provide:
  131. *
  132. * s0 = kseg0 CCA
  133. */
  134. entry_code = (u32 *)&mips_cps_core_entry;
  135. uasm_i_addiu(&entry_code, 16, 0, cca);
  136. blast_dcache_range((unsigned long)&mips_cps_core_entry,
  137. (unsigned long)entry_code);
  138. bc_wback_inv((unsigned long)&mips_cps_core_entry,
  139. (void *)entry_code - (void *)&mips_cps_core_entry);
  140. __sync();
  141. /* Allocate core boot configuration structs */
  142. ncores = mips_cps_numcores(0);
  143. mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
  144. GFP_KERNEL);
  145. if (!mips_cps_core_bootcfg) {
  146. pr_err("Failed to allocate boot config for %u cores\n", ncores);
  147. goto err_out;
  148. }
  149. /* Allocate VPE boot configuration structs */
  150. for (c = 0; c < ncores; c++) {
  151. core_vpes = core_vpe_count(0, c);
  152. mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
  153. sizeof(*mips_cps_core_bootcfg[c].vpe_config),
  154. GFP_KERNEL);
  155. if (!mips_cps_core_bootcfg[c].vpe_config) {
  156. pr_err("Failed to allocate %u VPE boot configs\n",
  157. core_vpes);
  158. goto err_out;
  159. }
  160. }
  161. /* Mark this CPU as booted */
  162. atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
  163. 1 << cpu_vpe_id(&current_cpu_data));
  164. return;
  165. err_out:
  166. /* Clean up allocations */
  167. if (mips_cps_core_bootcfg) {
  168. for (c = 0; c < ncores; c++)
  169. kfree(mips_cps_core_bootcfg[c].vpe_config);
  170. kfree(mips_cps_core_bootcfg);
  171. mips_cps_core_bootcfg = NULL;
  172. }
  173. /* Effectively disable SMP by declaring CPUs not present */
  174. for_each_possible_cpu(c) {
  175. if (c == 0)
  176. continue;
  177. set_cpu_present(c, false);
  178. }
  179. }
  180. static void boot_core(unsigned int core, unsigned int vpe_id)
  181. {
  182. u32 stat, seq_state;
  183. unsigned timeout;
  184. /* Select the appropriate core */
  185. mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
  186. /* Set its reset vector */
  187. write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
  188. /* Ensure its coherency is disabled */
  189. write_gcr_co_coherence(0);
  190. /* Start it with the legacy memory map and exception base */
  191. write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
  192. /* Ensure the core can access the GCRs */
  193. set_gcr_access(1 << core);
  194. if (mips_cpc_present()) {
  195. /* Reset the core */
  196. mips_cpc_lock_other(core);
  197. if (mips_cm_revision() >= CM_REV_CM3) {
  198. /* Run only the requested VP following the reset */
  199. write_cpc_co_vp_stop(0xf);
  200. write_cpc_co_vp_run(1 << vpe_id);
  201. /*
  202. * Ensure that the VP_RUN register is written before the
  203. * core leaves reset.
  204. */
  205. wmb();
  206. }
  207. write_cpc_co_cmd(CPC_Cx_CMD_RESET);
  208. timeout = 100;
  209. while (true) {
  210. stat = read_cpc_co_stat_conf();
  211. seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
  212. seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
  213. /* U6 == coherent execution, ie. the core is up */
  214. if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
  215. break;
  216. /* Delay a little while before we start warning */
  217. if (timeout) {
  218. timeout--;
  219. mdelay(10);
  220. continue;
  221. }
  222. pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
  223. core, stat);
  224. mdelay(1000);
  225. }
  226. mips_cpc_unlock_other();
  227. } else {
  228. /* Take the core out of reset */
  229. write_gcr_co_reset_release(0);
  230. }
  231. mips_cm_unlock_other();
  232. /* The core is now powered up */
  233. bitmap_set(core_power, core, 1);
  234. }
  235. static void remote_vpe_boot(void *dummy)
  236. {
  237. unsigned core = cpu_core(&current_cpu_data);
  238. struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
  239. mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
  240. }
  241. static int cps_boot_secondary(int cpu, struct task_struct *idle)
  242. {
  243. unsigned core = cpu_core(&cpu_data[cpu]);
  244. unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
  245. struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
  246. struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
  247. unsigned long core_entry;
  248. unsigned int remote;
  249. int err;
  250. /* We don't yet support booting CPUs in other clusters */
  251. if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
  252. return -ENOSYS;
  253. vpe_cfg->pc = (unsigned long)&smp_bootstrap;
  254. vpe_cfg->sp = __KSTK_TOS(idle);
  255. vpe_cfg->gp = (unsigned long)task_thread_info(idle);
  256. atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
  257. preempt_disable();
  258. if (!test_bit(core, core_power)) {
  259. /* Boot a VPE on a powered down core */
  260. boot_core(core, vpe_id);
  261. goto out;
  262. }
  263. if (cpu_has_vp) {
  264. mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
  265. core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
  266. write_gcr_co_reset_base(core_entry);
  267. mips_cm_unlock_other();
  268. }
  269. if (!cpus_are_siblings(cpu, smp_processor_id())) {
  270. /* Boot a VPE on another powered up core */
  271. for (remote = 0; remote < NR_CPUS; remote++) {
  272. if (!cpus_are_siblings(cpu, remote))
  273. continue;
  274. if (cpu_online(remote))
  275. break;
  276. }
  277. if (remote >= NR_CPUS) {
  278. pr_crit("No online CPU in core %u to start CPU%d\n",
  279. core, cpu);
  280. goto out;
  281. }
  282. err = smp_call_function_single(remote, remote_vpe_boot,
  283. NULL, 1);
  284. if (err)
  285. panic("Failed to call remote CPU\n");
  286. goto out;
  287. }
  288. BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
  289. /* Boot a VPE on this core */
  290. mips_cps_boot_vpes(core_cfg, vpe_id);
  291. out:
  292. preempt_enable();
  293. return 0;
  294. }
  295. static void cps_init_secondary(void)
  296. {
  297. /* Disable MT - we only want to run 1 TC per VPE */
  298. if (cpu_has_mipsmt)
  299. dmt();
  300. if (mips_cm_revision() >= CM_REV_CM3) {
  301. unsigned int ident = read_gic_vl_ident();
  302. /*
  303. * Ensure that our calculation of the VP ID matches up with
  304. * what the GIC reports, otherwise we'll have configured
  305. * interrupts incorrectly.
  306. */
  307. BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
  308. }
  309. if (cpu_has_veic)
  310. clear_c0_status(ST0_IM);
  311. else
  312. change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
  313. STATUSF_IP4 | STATUSF_IP5 |
  314. STATUSF_IP6 | STATUSF_IP7);
  315. }
  316. static void cps_smp_finish(void)
  317. {
  318. write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
  319. #ifdef CONFIG_MIPS_MT_FPAFF
  320. /* If we have an FPU, enroll ourselves in the FPU-full mask */
  321. if (cpu_has_fpu)
  322. cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
  323. #endif /* CONFIG_MIPS_MT_FPAFF */
  324. local_irq_enable();
  325. }
  326. #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
  327. enum cpu_death {
  328. CPU_DEATH_HALT,
  329. CPU_DEATH_POWER,
  330. };
  331. static void cps_shutdown_this_cpu(enum cpu_death death)
  332. {
  333. unsigned int cpu, core, vpe_id;
  334. cpu = smp_processor_id();
  335. core = cpu_core(&cpu_data[cpu]);
  336. if (death == CPU_DEATH_HALT) {
  337. vpe_id = cpu_vpe_id(&cpu_data[cpu]);
  338. pr_debug("Halting core %d VP%d\n", core, vpe_id);
  339. if (cpu_has_mipsmt) {
  340. /* Halt this TC */
  341. write_c0_tchalt(TCHALT_H);
  342. instruction_hazard();
  343. } else if (cpu_has_vp) {
  344. write_cpc_cl_vp_stop(1 << vpe_id);
  345. /* Ensure that the VP_STOP register is written */
  346. wmb();
  347. }
  348. } else {
  349. pr_debug("Gating power to core %d\n", core);
  350. /* Power down the core */
  351. cps_pm_enter_state(CPS_PM_POWER_GATED);
  352. }
  353. }
  354. #ifdef CONFIG_KEXEC
  355. static void cps_kexec_nonboot_cpu(void)
  356. {
  357. if (cpu_has_mipsmt || cpu_has_vp)
  358. cps_shutdown_this_cpu(CPU_DEATH_HALT);
  359. else
  360. cps_shutdown_this_cpu(CPU_DEATH_POWER);
  361. }
  362. #endif /* CONFIG_KEXEC */
  363. #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
  364. #ifdef CONFIG_HOTPLUG_CPU
  365. static int cps_cpu_disable(void)
  366. {
  367. unsigned cpu = smp_processor_id();
  368. struct core_boot_config *core_cfg;
  369. if (!cps_pm_support_state(CPS_PM_POWER_GATED))
  370. return -EINVAL;
  371. core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
  372. atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
  373. smp_mb__after_atomic();
  374. set_cpu_online(cpu, false);
  375. calculate_cpu_foreign_map();
  376. irq_migrate_all_off_this_cpu();
  377. return 0;
  378. }
  379. static unsigned cpu_death_sibling;
  380. static enum cpu_death cpu_death;
  381. void play_dead(void)
  382. {
  383. unsigned int cpu;
  384. local_irq_disable();
  385. idle_task_exit();
  386. cpu = smp_processor_id();
  387. cpu_death = CPU_DEATH_POWER;
  388. pr_debug("CPU%d going offline\n", cpu);
  389. if (cpu_has_mipsmt || cpu_has_vp) {
  390. /* Look for another online VPE within the core */
  391. for_each_online_cpu(cpu_death_sibling) {
  392. if (!cpus_are_siblings(cpu, cpu_death_sibling))
  393. continue;
  394. /*
  395. * There is an online VPE within the core. Just halt
  396. * this TC and leave the core alone.
  397. */
  398. cpu_death = CPU_DEATH_HALT;
  399. break;
  400. }
  401. }
  402. /* This CPU has chosen its way out */
  403. (void)cpu_report_death();
  404. cps_shutdown_this_cpu(cpu_death);
  405. /* This should never be reached */
  406. panic("Failed to offline CPU %u", cpu);
  407. }
  408. static void wait_for_sibling_halt(void *ptr_cpu)
  409. {
  410. unsigned cpu = (unsigned long)ptr_cpu;
  411. unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
  412. unsigned halted;
  413. unsigned long flags;
  414. do {
  415. local_irq_save(flags);
  416. settc(vpe_id);
  417. halted = read_tc_c0_tchalt();
  418. local_irq_restore(flags);
  419. } while (!(halted & TCHALT_H));
  420. }
  421. static void cps_cpu_die(unsigned int cpu)
  422. {
  423. unsigned core = cpu_core(&cpu_data[cpu]);
  424. unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
  425. ktime_t fail_time;
  426. unsigned stat;
  427. int err;
  428. /* Wait for the cpu to choose its way out */
  429. if (!cpu_wait_death(cpu, 5)) {
  430. pr_err("CPU%u: didn't offline\n", cpu);
  431. return;
  432. }
  433. /*
  434. * Now wait for the CPU to actually offline. Without doing this that
  435. * offlining may race with one or more of:
  436. *
  437. * - Onlining the CPU again.
  438. * - Powering down the core if another VPE within it is offlined.
  439. * - A sibling VPE entering a non-coherent state.
  440. *
  441. * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
  442. * with which we could race, so do nothing.
  443. */
  444. if (cpu_death == CPU_DEATH_POWER) {
  445. /*
  446. * Wait for the core to enter a powered down or clock gated
  447. * state, the latter happening when a JTAG probe is connected
  448. * in which case the CPC will refuse to power down the core.
  449. */
  450. fail_time = ktime_add_ms(ktime_get(), 2000);
  451. do {
  452. mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
  453. mips_cpc_lock_other(core);
  454. stat = read_cpc_co_stat_conf();
  455. stat &= CPC_Cx_STAT_CONF_SEQSTATE;
  456. stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
  457. mips_cpc_unlock_other();
  458. mips_cm_unlock_other();
  459. if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
  460. stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
  461. stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
  462. break;
  463. /*
  464. * The core ought to have powered down, but didn't &
  465. * now we don't really know what state it's in. It's
  466. * likely that its _pwr_up pin has been wired to logic
  467. * 1 & it powered back up as soon as we powered it
  468. * down...
  469. *
  470. * The best we can do is warn the user & continue in
  471. * the hope that the core is doing nothing harmful &
  472. * might behave properly if we online it later.
  473. */
  474. if (WARN(ktime_after(ktime_get(), fail_time),
  475. "CPU%u hasn't powered down, seq. state %u\n",
  476. cpu, stat))
  477. break;
  478. } while (1);
  479. /* Indicate the core is powered off */
  480. bitmap_clear(core_power, core, 1);
  481. } else if (cpu_has_mipsmt) {
  482. /*
  483. * Have a CPU with access to the offlined CPUs registers wait
  484. * for its TC to halt.
  485. */
  486. err = smp_call_function_single(cpu_death_sibling,
  487. wait_for_sibling_halt,
  488. (void *)(unsigned long)cpu, 1);
  489. if (err)
  490. panic("Failed to call remote sibling CPU\n");
  491. } else if (cpu_has_vp) {
  492. do {
  493. mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
  494. stat = read_cpc_co_vp_running();
  495. mips_cm_unlock_other();
  496. } while (stat & (1 << vpe_id));
  497. }
  498. }
  499. #endif /* CONFIG_HOTPLUG_CPU */
  500. static const struct plat_smp_ops cps_smp_ops = {
  501. .smp_setup = cps_smp_setup,
  502. .prepare_cpus = cps_prepare_cpus,
  503. .boot_secondary = cps_boot_secondary,
  504. .init_secondary = cps_init_secondary,
  505. .smp_finish = cps_smp_finish,
  506. .send_ipi_single = mips_smp_send_ipi_single,
  507. .send_ipi_mask = mips_smp_send_ipi_mask,
  508. #ifdef CONFIG_HOTPLUG_CPU
  509. .cpu_disable = cps_cpu_disable,
  510. .cpu_die = cps_cpu_die,
  511. #endif
  512. #ifdef CONFIG_KEXEC
  513. .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
  514. #endif
  515. };
  516. bool mips_cps_smp_in_use(void)
  517. {
  518. extern const struct plat_smp_ops *mp_ops;
  519. return mp_ops == &cps_smp_ops;
  520. }
  521. int register_cps_smp_ops(void)
  522. {
  523. if (!mips_cm_present()) {
  524. pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
  525. return -ENODEV;
  526. }
  527. /* check we have a GIC - we need one for IPIs */
  528. if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
  529. pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
  530. return -ENODEV;
  531. }
  532. register_smp_ops(&cps_smp_ops);
  533. return 0;
  534. }