ip27-nmi.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/mmzone.h>
  4. #include <linux/nodemask.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/smp.h>
  7. #include <linux/atomic.h>
  8. #include <asm/sn/types.h>
  9. #include <asm/sn/addrs.h>
  10. #include <asm/sn/nmi.h>
  11. #include <asm/sn/arch.h>
  12. #include <asm/sn/agent.h>
  13. #if 0
  14. #define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
  15. #else
  16. #define NODE_NUM_CPUS(n) CPUS_PER_NODE
  17. #endif
  18. #define SEND_NMI(_nasid, _slice) \
  19. REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
  20. typedef unsigned long machreg_t;
  21. static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  22. /*
  23. * Let's see what else we need to do here. Set up sp, gp?
  24. */
  25. void nmi_dump(void)
  26. {
  27. void cont_nmi_dump(void);
  28. cont_nmi_dump();
  29. }
  30. void install_cpu_nmi_handler(int slice)
  31. {
  32. nmi_t *nmi_addr;
  33. nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
  34. if (nmi_addr->call_addr)
  35. return;
  36. nmi_addr->magic = NMI_MAGIC;
  37. nmi_addr->call_addr = (void *)nmi_dump;
  38. nmi_addr->call_addr_c =
  39. (void *)(~((unsigned long)(nmi_addr->call_addr)));
  40. nmi_addr->call_parm = 0;
  41. }
  42. /*
  43. * Copy the cpu registers which have been saved in the IP27prom format
  44. * into the eframe format for the node under consideration.
  45. */
  46. void nmi_cpu_eframe_save(nasid_t nasid, int slice)
  47. {
  48. struct reg_struct *nr;
  49. int i;
  50. /* Get the pointer to the current cpu's register set. */
  51. nr = (struct reg_struct *)
  52. (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
  53. slice * IP27_NMI_KREGS_CPU_SIZE);
  54. pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
  55. /*
  56. * Saved main processor registers
  57. */
  58. for (i = 0; i < 32; ) {
  59. if ((i % 4) == 0)
  60. pr_emerg("$%2d :", i);
  61. pr_cont(" %016lx", nr->gpr[i]);
  62. i++;
  63. if ((i % 4) == 0)
  64. pr_cont("\n");
  65. }
  66. pr_emerg("Hi : (value lost)\n");
  67. pr_emerg("Lo : (value lost)\n");
  68. /*
  69. * Saved cp0 registers
  70. */
  71. pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc);
  72. pr_emerg("%s\n", print_tainted());
  73. pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
  74. pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
  75. pr_emerg("Status: %08lx ", nr->sr);
  76. if (nr->sr & ST0_KX)
  77. pr_cont("KX ");
  78. if (nr->sr & ST0_SX)
  79. pr_cont("SX ");
  80. if (nr->sr & ST0_UX)
  81. pr_cont("UX ");
  82. switch (nr->sr & ST0_KSU) {
  83. case KSU_USER:
  84. pr_cont("USER ");
  85. break;
  86. case KSU_SUPERVISOR:
  87. pr_cont("SUPERVISOR ");
  88. break;
  89. case KSU_KERNEL:
  90. pr_cont("KERNEL ");
  91. break;
  92. default:
  93. pr_cont("BAD_MODE ");
  94. break;
  95. }
  96. if (nr->sr & ST0_ERL)
  97. pr_cont("ERL ");
  98. if (nr->sr & ST0_EXL)
  99. pr_cont("EXL ");
  100. if (nr->sr & ST0_IE)
  101. pr_cont("IE ");
  102. pr_cont("\n");
  103. pr_emerg("Cause : %08lx\n", nr->cause);
  104. pr_emerg("PrId : %08x\n", read_c0_prid());
  105. pr_emerg("BadVA : %016lx\n", nr->badva);
  106. pr_emerg("CErr : %016lx\n", nr->cache_err);
  107. pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
  108. pr_emerg("\n");
  109. }
  110. void nmi_dump_hub_irq(nasid_t nasid, int slice)
  111. {
  112. u64 mask0, mask1, pend0, pend1;
  113. if (slice == 0) { /* Slice A */
  114. mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
  115. mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
  116. } else { /* Slice B */
  117. mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
  118. mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
  119. }
  120. pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
  121. pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
  122. pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
  123. pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
  124. pr_emerg("\n\n");
  125. }
  126. /*
  127. * Copy the cpu registers which have been saved in the IP27prom format
  128. * into the eframe format for the node under consideration.
  129. */
  130. void nmi_node_eframe_save(nasid_t nasid)
  131. {
  132. int slice;
  133. if (nasid == INVALID_NASID)
  134. return;
  135. /* Save the registers into eframe for each cpu */
  136. for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
  137. nmi_cpu_eframe_save(nasid, slice);
  138. nmi_dump_hub_irq(nasid, slice);
  139. }
  140. }
  141. /*
  142. * Save the nmi cpu registers for all cpus in the system.
  143. */
  144. void
  145. nmi_eframes_save(void)
  146. {
  147. nasid_t nasid;
  148. for_each_online_node(nasid)
  149. nmi_node_eframe_save(nasid);
  150. }
  151. void
  152. cont_nmi_dump(void)
  153. {
  154. #ifndef REAL_NMI_SIGNAL
  155. static atomic_t nmied_cpus = ATOMIC_INIT(0);
  156. atomic_inc(&nmied_cpus);
  157. #endif
  158. /*
  159. * Only allow 1 cpu to proceed
  160. */
  161. arch_spin_lock(&nmi_lock);
  162. #ifdef REAL_NMI_SIGNAL
  163. /*
  164. * Wait up to 15 seconds for the other cpus to respond to the NMI.
  165. * If a cpu has not responded after 10 sec, send it 1 additional NMI.
  166. * This is for 2 reasons:
  167. * - sometimes a MMSC fail to NMI all cpus.
  168. * - on 512p SN0 system, the MMSC will only send NMIs to
  169. * half the cpus. Unfortunately, we don't know which cpus may be
  170. * NMIed - it depends on how the site chooses to configure.
  171. *
  172. * Note: it has been measure that it takes the MMSC up to 2.3 secs to
  173. * send NMIs to all cpus on a 256p system.
  174. */
  175. for (i=0; i < 1500; i++) {
  176. for_each_online_node(node)
  177. if (NODEPDA(node)->dump_count == 0)
  178. break;
  179. if (node == MAX_NUMNODES)
  180. break;
  181. if (i == 1000) {
  182. for_each_online_node(node)
  183. if (NODEPDA(node)->dump_count == 0) {
  184. cpu = cpumask_first(cpumask_of_node(node));
  185. for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
  186. CPUMASK_SETB(nmied_cpus, cpu);
  187. /*
  188. * cputonasid, cputoslice
  189. * needs kernel cpuid
  190. */
  191. SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
  192. }
  193. }
  194. }
  195. udelay(10000);
  196. }
  197. #else
  198. while (atomic_read(&nmied_cpus) != num_online_cpus());
  199. #endif
  200. /*
  201. * Save the nmi cpu registers for all cpu in the eframe format.
  202. */
  203. nmi_eframes_save();
  204. LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
  205. }