pervasive.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * CBE Pervasive Monitor and Debug
  4. *
  5. * (C) Copyright IBM Corporation 2005
  6. *
  7. * Authors: Maximino Aguilar ([email protected])
  8. * Michael N. Day ([email protected])
  9. */
  10. #undef DEBUG
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/percpu.h>
  14. #include <linux/types.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/pgtable.h>
  17. #include <asm/io.h>
  18. #include <asm/machdep.h>
  19. #include <asm/reg.h>
  20. #include <asm/cell-regs.h>
  21. #include <asm/cpu_has_feature.h>
  22. #include "pervasive.h"
  23. #include "ras.h"
  24. static void cbe_power_save(void)
  25. {
  26. unsigned long ctrl, thread_switch_control;
  27. /* Ensure our interrupt state is properly tracked */
  28. if (!prep_irq_for_idle())
  29. return;
  30. ctrl = mfspr(SPRN_CTRLF);
  31. /* Enable DEC and EE interrupt request */
  32. thread_switch_control = mfspr(SPRN_TSC_CELL);
  33. thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
  34. switch (ctrl & CTRL_CT) {
  35. case CTRL_CT0:
  36. thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
  37. break;
  38. case CTRL_CT1:
  39. thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
  40. break;
  41. default:
  42. printk(KERN_WARNING "%s: unknown configuration\n",
  43. __func__);
  44. break;
  45. }
  46. mtspr(SPRN_TSC_CELL, thread_switch_control);
  47. /*
  48. * go into low thread priority, medium priority will be
  49. * restored for us after wake-up.
  50. */
  51. HMT_low();
  52. /*
  53. * atomically disable thread execution and runlatch.
  54. * External and Decrementer exceptions are still handled when the
  55. * thread is disabled but now enter in cbe_system_reset_exception()
  56. */
  57. ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
  58. mtspr(SPRN_CTRLT, ctrl);
  59. /* Re-enable interrupts in MSR */
  60. __hard_irq_enable();
  61. }
  62. static int cbe_system_reset_exception(struct pt_regs *regs)
  63. {
  64. switch (regs->msr & SRR1_WAKEMASK) {
  65. case SRR1_WAKEDEC:
  66. set_dec(1);
  67. break;
  68. case SRR1_WAKEEE:
  69. /*
  70. * Handle these when interrupts get re-enabled and we take
  71. * them as regular exceptions. We are in an NMI context
  72. * and can't handle these here.
  73. */
  74. break;
  75. case SRR1_WAKEMT:
  76. return cbe_sysreset_hack();
  77. #ifdef CONFIG_CBE_RAS
  78. case SRR1_WAKESYSERR:
  79. cbe_system_error_exception(regs);
  80. break;
  81. case SRR1_WAKETHERM:
  82. cbe_thermal_exception(regs);
  83. break;
  84. #endif /* CONFIG_CBE_RAS */
  85. default:
  86. /* do system reset */
  87. return 0;
  88. }
  89. /* everything handled */
  90. return 1;
  91. }
  92. void __init cbe_pervasive_init(void)
  93. {
  94. int cpu;
  95. if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
  96. return;
  97. for_each_possible_cpu(cpu) {
  98. struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
  99. if (!regs)
  100. continue;
  101. /* Enable Pause(0) control bit */
  102. out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
  103. CBE_PMD_PAUSE_ZERO_CONTROL);
  104. }
  105. ppc_md.power_save = cbe_power_save;
  106. ppc_md.system_reset_exception = cbe_system_reset_exception;
  107. }