opal.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PowerNV OPAL high level interfaces
  4. *
  5. * Copyright 2011 IBM Corp.
  6. */
  7. #define pr_fmt(fmt) "opal: " fmt
  8. #include <linux/printk.h>
  9. #include <linux/types.h>
  10. #include <linux/of.h>
  11. #include <linux/of_fdt.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/of_address.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/notifier.h>
  16. #include <linux/slab.h>
  17. #include <linux/sched.h>
  18. #include <linux/kobject.h>
  19. #include <linux/delay.h>
  20. #include <linux/memblock.h>
  21. #include <linux/kthread.h>
  22. #include <linux/freezer.h>
  23. #include <linux/kmsg_dump.h>
  24. #include <linux/console.h>
  25. #include <linux/sched/debug.h>
  26. #include <asm/machdep.h>
  27. #include <asm/opal.h>
  28. #include <asm/firmware.h>
  29. #include <asm/mce.h>
  30. #include <asm/imc-pmu.h>
  31. #include <asm/bug.h>
  32. #include "powernv.h"
  33. #define OPAL_MSG_QUEUE_MAX 16
  34. struct opal_msg_node {
  35. struct list_head list;
  36. struct opal_msg msg;
  37. };
  38. static DEFINE_SPINLOCK(msg_list_lock);
  39. static LIST_HEAD(msg_list);
  40. /* /sys/firmware/opal */
  41. struct kobject *opal_kobj;
  42. struct opal {
  43. u64 base;
  44. u64 entry;
  45. u64 size;
  46. } opal;
  47. struct mcheck_recoverable_range {
  48. u64 start_addr;
  49. u64 end_addr;
  50. u64 recover_addr;
  51. };
  52. static int msg_list_size;
  53. static struct mcheck_recoverable_range *mc_recoverable_range;
  54. static int mc_recoverable_range_len;
  55. struct device_node *opal_node;
  56. static DEFINE_SPINLOCK(opal_write_lock);
  57. static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
  58. static uint32_t opal_heartbeat;
  59. static struct task_struct *kopald_tsk;
  60. static struct opal_msg *opal_msg;
  61. static u32 opal_msg_size __ro_after_init;
  62. void __init opal_configure_cores(void)
  63. {
  64. u64 reinit_flags = 0;
  65. /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
  66. *
  67. * It will preserve non volatile GPRs and HSPRG0/1. It will
  68. * also restore HIDs and other SPRs to their original value
  69. * but it might clobber a bunch.
  70. */
  71. #ifdef __BIG_ENDIAN__
  72. reinit_flags |= OPAL_REINIT_CPUS_HILE_BE;
  73. #else
  74. reinit_flags |= OPAL_REINIT_CPUS_HILE_LE;
  75. #endif
  76. /*
  77. * POWER9 always support running hash:
  78. * ie. Host hash supports hash guests
  79. * Host radix supports hash/radix guests
  80. */
  81. if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
  82. reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
  83. if (early_radix_enabled())
  84. reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
  85. }
  86. opal_reinit_cpus(reinit_flags);
  87. /* Restore some bits */
  88. if (cur_cpu_spec->cpu_restore)
  89. cur_cpu_spec->cpu_restore();
  90. }
  91. int __init early_init_dt_scan_opal(unsigned long node,
  92. const char *uname, int depth, void *data)
  93. {
  94. const void *basep, *entryp, *sizep;
  95. int basesz, entrysz, runtimesz;
  96. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  97. return 0;
  98. basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
  99. entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
  100. sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
  101. if (!basep || !entryp || !sizep)
  102. return 1;
  103. opal.base = of_read_number(basep, basesz/4);
  104. opal.entry = of_read_number(entryp, entrysz/4);
  105. opal.size = of_read_number(sizep, runtimesz/4);
  106. pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
  107. opal.base, basep, basesz);
  108. pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
  109. opal.entry, entryp, entrysz);
  110. pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
  111. opal.size, sizep, runtimesz);
  112. if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
  113. powerpc_firmware_features |= FW_FEATURE_OPAL;
  114. pr_debug("OPAL detected !\n");
  115. } else {
  116. panic("OPAL != V3 detected, no longer supported.\n");
  117. }
  118. return 1;
  119. }
  120. int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
  121. const char *uname, int depth, void *data)
  122. {
  123. int i, psize, size;
  124. const __be32 *prop;
  125. if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
  126. return 0;
  127. prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
  128. if (!prop)
  129. return 1;
  130. pr_debug("Found machine check recoverable ranges.\n");
  131. /*
  132. * Calculate number of available entries.
  133. *
  134. * Each recoverable address range entry is (start address, len,
  135. * recovery address), 2 cells each for start and recovery address,
  136. * 1 cell for len, totalling 5 cells per entry.
  137. */
  138. mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
  139. /* Sanity check */
  140. if (!mc_recoverable_range_len)
  141. return 1;
  142. /* Size required to hold all the entries. */
  143. size = mc_recoverable_range_len *
  144. sizeof(struct mcheck_recoverable_range);
  145. /*
  146. * Allocate a buffer to hold the MC recoverable ranges.
  147. */
  148. mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
  149. if (!mc_recoverable_range)
  150. panic("%s: Failed to allocate %u bytes align=0x%lx\n",
  151. __func__, size, __alignof__(u64));
  152. for (i = 0; i < mc_recoverable_range_len; i++) {
  153. mc_recoverable_range[i].start_addr =
  154. of_read_number(prop + (i * 5) + 0, 2);
  155. mc_recoverable_range[i].end_addr =
  156. mc_recoverable_range[i].start_addr +
  157. of_read_number(prop + (i * 5) + 2, 1);
  158. mc_recoverable_range[i].recover_addr =
  159. of_read_number(prop + (i * 5) + 3, 2);
  160. pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
  161. mc_recoverable_range[i].start_addr,
  162. mc_recoverable_range[i].end_addr,
  163. mc_recoverable_range[i].recover_addr);
  164. }
  165. return 1;
  166. }
  167. static int __init opal_register_exception_handlers(void)
  168. {
  169. #ifdef __BIG_ENDIAN__
  170. u64 glue;
  171. if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
  172. return -ENODEV;
  173. /* Hookup some exception handlers except machine check. We use the
  174. * fwnmi area at 0x7000 to provide the glue space to OPAL
  175. */
  176. glue = 0x7000;
  177. /*
  178. * Only ancient OPAL firmware requires this.
  179. * Specifically, firmware from FW810.00 (released June 2014)
  180. * through FW810.20 (Released October 2014).
  181. *
  182. * Check if we are running on newer (post Oct 2014) firmware that
  183. * exports the OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to
  184. * patch the HMI interrupt and we catch it directly in Linux.
  185. *
  186. * For older firmware (i.e < FW810.20), we fallback to old behavior and
  187. * let OPAL patch the HMI vector and handle it inside OPAL firmware.
  188. *
  189. * For newer firmware we catch/handle the HMI directly in Linux.
  190. */
  191. if (!opal_check_token(OPAL_HANDLE_HMI)) {
  192. pr_info("Old firmware detected, OPAL handles HMIs.\n");
  193. opal_register_exception_handler(
  194. OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
  195. 0, glue);
  196. glue += 128;
  197. }
  198. /*
  199. * Only applicable to ancient firmware, all modern
  200. * (post March 2015/skiboot 5.0) firmware will just return
  201. * OPAL_UNSUPPORTED.
  202. */
  203. opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
  204. #endif
  205. return 0;
  206. }
  207. machine_early_initcall(powernv, opal_register_exception_handlers);
  208. static void queue_replay_msg(void *msg)
  209. {
  210. struct opal_msg_node *msg_node;
  211. if (msg_list_size < OPAL_MSG_QUEUE_MAX) {
  212. msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
  213. if (msg_node) {
  214. INIT_LIST_HEAD(&msg_node->list);
  215. memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
  216. list_add_tail(&msg_node->list, &msg_list);
  217. msg_list_size++;
  218. } else
  219. pr_warn_once("message queue no memory\n");
  220. if (msg_list_size >= OPAL_MSG_QUEUE_MAX)
  221. pr_warn_once("message queue full\n");
  222. }
  223. }
  224. static void dequeue_replay_msg(enum opal_msg_type msg_type)
  225. {
  226. struct opal_msg_node *msg_node, *tmp;
  227. list_for_each_entry_safe(msg_node, tmp, &msg_list, list) {
  228. if (be32_to_cpu(msg_node->msg.msg_type) != msg_type)
  229. continue;
  230. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  231. msg_type,
  232. &msg_node->msg);
  233. list_del(&msg_node->list);
  234. kfree(msg_node);
  235. msg_list_size--;
  236. }
  237. }
  238. /*
  239. * Opal message notifier based on message type. Allow subscribers to get
  240. * notified for specific messgae type.
  241. */
  242. int opal_message_notifier_register(enum opal_msg_type msg_type,
  243. struct notifier_block *nb)
  244. {
  245. int ret;
  246. unsigned long flags;
  247. if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
  248. pr_warn("%s: Invalid arguments, msg_type:%d\n",
  249. __func__, msg_type);
  250. return -EINVAL;
  251. }
  252. spin_lock_irqsave(&msg_list_lock, flags);
  253. ret = atomic_notifier_chain_register(
  254. &opal_msg_notifier_head[msg_type], nb);
  255. /*
  256. * If the registration succeeded, replay any queued messages that came
  257. * in prior to the notifier chain registration. msg_list_lock held here
  258. * to ensure they're delivered prior to any subsequent messages.
  259. */
  260. if (ret == 0)
  261. dequeue_replay_msg(msg_type);
  262. spin_unlock_irqrestore(&msg_list_lock, flags);
  263. return ret;
  264. }
  265. EXPORT_SYMBOL_GPL(opal_message_notifier_register);
  266. int opal_message_notifier_unregister(enum opal_msg_type msg_type,
  267. struct notifier_block *nb)
  268. {
  269. return atomic_notifier_chain_unregister(
  270. &opal_msg_notifier_head[msg_type], nb);
  271. }
  272. EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
  273. static void opal_message_do_notify(uint32_t msg_type, void *msg)
  274. {
  275. unsigned long flags;
  276. bool queued = false;
  277. spin_lock_irqsave(&msg_list_lock, flags);
  278. if (opal_msg_notifier_head[msg_type].head == NULL) {
  279. /*
  280. * Queue up the msg since no notifiers have registered
  281. * yet for this msg_type.
  282. */
  283. queue_replay_msg(msg);
  284. queued = true;
  285. }
  286. spin_unlock_irqrestore(&msg_list_lock, flags);
  287. if (queued)
  288. return;
  289. /* notify subscribers */
  290. atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
  291. msg_type, msg);
  292. }
  293. static void opal_handle_message(void)
  294. {
  295. s64 ret;
  296. u32 type;
  297. ret = opal_get_msg(__pa(opal_msg), opal_msg_size);
  298. /* No opal message pending. */
  299. if (ret == OPAL_RESOURCE)
  300. return;
  301. /* check for errors. */
  302. if (ret) {
  303. pr_warn("%s: Failed to retrieve opal message, err=%lld\n",
  304. __func__, ret);
  305. return;
  306. }
  307. type = be32_to_cpu(opal_msg->msg_type);
  308. /* Sanity check */
  309. if (type >= OPAL_MSG_TYPE_MAX) {
  310. pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
  311. return;
  312. }
  313. opal_message_do_notify(type, (void *)opal_msg);
  314. }
  315. static irqreturn_t opal_message_notify(int irq, void *data)
  316. {
  317. opal_handle_message();
  318. return IRQ_HANDLED;
  319. }
  320. static int __init opal_message_init(struct device_node *opal_node)
  321. {
  322. int ret, i, irq;
  323. ret = of_property_read_u32(opal_node, "opal-msg-size", &opal_msg_size);
  324. if (ret) {
  325. pr_notice("Failed to read opal-msg-size property\n");
  326. opal_msg_size = sizeof(struct opal_msg);
  327. }
  328. opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
  329. if (!opal_msg) {
  330. opal_msg_size = sizeof(struct opal_msg);
  331. /* Try to allocate fixed message size */
  332. opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
  333. BUG_ON(opal_msg == NULL);
  334. }
  335. for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
  336. ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
  337. irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING));
  338. if (!irq) {
  339. pr_err("%s: Can't register OPAL event irq (%d)\n",
  340. __func__, irq);
  341. return irq;
  342. }
  343. ret = request_irq(irq, opal_message_notify,
  344. IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL);
  345. if (ret) {
  346. pr_err("%s: Can't request OPAL event irq (%d)\n",
  347. __func__, ret);
  348. return ret;
  349. }
  350. return 0;
  351. }
  352. int opal_get_chars(uint32_t vtermno, char *buf, int count)
  353. {
  354. s64 rc;
  355. __be64 evt, len;
  356. if (!opal.entry)
  357. return -ENODEV;
  358. opal_poll_events(&evt);
  359. if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
  360. return 0;
  361. len = cpu_to_be64(count);
  362. rc = opal_console_read(vtermno, &len, buf);
  363. if (rc == OPAL_SUCCESS)
  364. return be64_to_cpu(len);
  365. return 0;
  366. }
  367. static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic)
  368. {
  369. unsigned long flags = 0 /* shut up gcc */;
  370. int written;
  371. __be64 olen;
  372. s64 rc;
  373. if (!opal.entry)
  374. return -ENODEV;
  375. if (atomic)
  376. spin_lock_irqsave(&opal_write_lock, flags);
  377. rc = opal_console_write_buffer_space(vtermno, &olen);
  378. if (rc || be64_to_cpu(olen) < total_len) {
  379. /* Closed -> drop characters */
  380. if (rc)
  381. written = total_len;
  382. else
  383. written = -EAGAIN;
  384. goto out;
  385. }
  386. /* Should not get a partial write here because space is available. */
  387. olen = cpu_to_be64(total_len);
  388. rc = opal_console_write(vtermno, &olen, data);
  389. if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  390. if (rc == OPAL_BUSY_EVENT)
  391. opal_poll_events(NULL);
  392. written = -EAGAIN;
  393. goto out;
  394. }
  395. /* Closed or other error drop */
  396. if (rc != OPAL_SUCCESS) {
  397. written = opal_error_code(rc);
  398. goto out;
  399. }
  400. written = be64_to_cpu(olen);
  401. if (written < total_len) {
  402. if (atomic) {
  403. /* Should not happen */
  404. pr_warn("atomic console write returned partial "
  405. "len=%d written=%d\n", total_len, written);
  406. }
  407. if (!written)
  408. written = -EAGAIN;
  409. }
  410. out:
  411. if (atomic)
  412. spin_unlock_irqrestore(&opal_write_lock, flags);
  413. return written;
  414. }
  415. int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  416. {
  417. return __opal_put_chars(vtermno, data, total_len, false);
  418. }
  419. /*
  420. * opal_put_chars_atomic will not perform partial-writes. Data will be
  421. * atomically written to the terminal or not at all. This is not strictly
  422. * true at the moment because console space can race with OPAL's console
  423. * writes.
  424. */
  425. int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
  426. {
  427. return __opal_put_chars(vtermno, data, total_len, true);
  428. }
  429. static s64 __opal_flush_console(uint32_t vtermno)
  430. {
  431. s64 rc;
  432. if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
  433. __be64 evt;
  434. /*
  435. * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
  436. * the console can still be flushed by calling the polling
  437. * function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
  438. */
  439. WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
  440. opal_poll_events(&evt);
  441. if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT))
  442. return OPAL_SUCCESS;
  443. return OPAL_BUSY;
  444. } else {
  445. rc = opal_console_flush(vtermno);
  446. if (rc == OPAL_BUSY_EVENT) {
  447. opal_poll_events(NULL);
  448. rc = OPAL_BUSY;
  449. }
  450. return rc;
  451. }
  452. }
  453. /*
  454. * opal_flush_console spins until the console is flushed
  455. */
  456. int opal_flush_console(uint32_t vtermno)
  457. {
  458. for (;;) {
  459. s64 rc = __opal_flush_console(vtermno);
  460. if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
  461. mdelay(1);
  462. continue;
  463. }
  464. return opal_error_code(rc);
  465. }
  466. }
  467. /*
  468. * opal_flush_chars is an hvc interface that sleeps until the console is
  469. * flushed if wait, otherwise it will return -EBUSY if the console has data,
  470. * -EAGAIN if it has data and some of it was flushed.
  471. */
  472. int opal_flush_chars(uint32_t vtermno, bool wait)
  473. {
  474. for (;;) {
  475. s64 rc = __opal_flush_console(vtermno);
  476. if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
  477. if (wait) {
  478. msleep(OPAL_BUSY_DELAY_MS);
  479. continue;
  480. }
  481. if (rc == OPAL_PARTIAL)
  482. return -EAGAIN;
  483. }
  484. return opal_error_code(rc);
  485. }
  486. }
  487. static int opal_recover_mce(struct pt_regs *regs,
  488. struct machine_check_event *evt)
  489. {
  490. int recovered = 0;
  491. if (regs_is_unrecoverable(regs)) {
  492. /* If MSR_RI isn't set, we cannot recover */
  493. pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
  494. recovered = 0;
  495. } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
  496. /* Platform corrected itself */
  497. recovered = 1;
  498. } else if (evt->severity == MCE_SEV_FATAL) {
  499. /* Fatal machine check */
  500. pr_err("Machine check interrupt is fatal\n");
  501. recovered = 0;
  502. }
  503. if (!recovered && evt->sync_error) {
  504. /*
  505. * Try to kill processes if we get a synchronous machine check
  506. * (e.g., one caused by execution of this instruction). This
  507. * will devolve into a panic if we try to kill init or are in
  508. * an interrupt etc.
  509. *
  510. * TODO: Queue up this address for hwpoisioning later.
  511. * TODO: This is not quite right for d-side machine
  512. * checks ->nip is not necessarily the important
  513. * address.
  514. */
  515. if ((user_mode(regs))) {
  516. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  517. recovered = 1;
  518. } else if (die_will_crash()) {
  519. /*
  520. * die() would kill the kernel, so better to go via
  521. * the platform reboot code that will log the
  522. * machine check.
  523. */
  524. recovered = 0;
  525. } else {
  526. die_mce("Machine check", regs, SIGBUS);
  527. recovered = 1;
  528. }
  529. }
  530. return recovered;
  531. }
  532. void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
  533. {
  534. panic_flush_kmsg_start();
  535. pr_emerg("Hardware platform error: %s\n", msg);
  536. if (regs)
  537. show_regs(regs);
  538. smp_send_stop();
  539. panic_flush_kmsg_end();
  540. /*
  541. * Don't bother to shut things down because this will
  542. * xstop the system.
  543. */
  544. if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg)
  545. == OPAL_UNSUPPORTED) {
  546. pr_emerg("Reboot type %d not supported for %s\n",
  547. OPAL_REBOOT_PLATFORM_ERROR, msg);
  548. }
  549. /*
  550. * We reached here. There can be three possibilities:
  551. * 1. We are running on a firmware level that do not support
  552. * opal_cec_reboot2()
  553. * 2. We are running on a firmware level that do not support
  554. * OPAL_REBOOT_PLATFORM_ERROR reboot type.
  555. * 3. We are running on FSP based system that does not need
  556. * opal to trigger checkstop explicitly for error analysis.
  557. * The FSP PRD component would have already got notified
  558. * about this error through other channels.
  559. * 4. We are running on a newer skiboot that by default does
  560. * not cause a checkstop, drops us back to the kernel to
  561. * extract context and state at the time of the error.
  562. */
  563. panic(msg);
  564. }
  565. int opal_machine_check(struct pt_regs *regs)
  566. {
  567. struct machine_check_event evt;
  568. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  569. return 0;
  570. /* Print things out */
  571. if (evt.version != MCE_V1) {
  572. pr_err("Machine Check Exception, Unknown event version %d !\n",
  573. evt.version);
  574. return 0;
  575. }
  576. machine_check_print_event_info(&evt, user_mode(regs), false);
  577. if (opal_recover_mce(regs, &evt))
  578. return 1;
  579. pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception");
  580. }
  581. /* Early hmi handler called in real mode. */
  582. int opal_hmi_exception_early(struct pt_regs *regs)
  583. {
  584. s64 rc;
  585. /*
  586. * call opal hmi handler. Pass paca address as token.
  587. * The return value OPAL_SUCCESS is an indication that there is
  588. * an HMI event generated waiting to pull by Linux.
  589. */
  590. rc = opal_handle_hmi();
  591. if (rc == OPAL_SUCCESS) {
  592. local_paca->hmi_event_available = 1;
  593. return 1;
  594. }
  595. return 0;
  596. }
  597. int opal_hmi_exception_early2(struct pt_regs *regs)
  598. {
  599. s64 rc;
  600. __be64 out_flags;
  601. /*
  602. * call opal hmi handler.
  603. * Check 64-bit flag mask to find out if an event was generated,
  604. * and whether TB is still valid or not etc.
  605. */
  606. rc = opal_handle_hmi2(&out_flags);
  607. if (rc != OPAL_SUCCESS)
  608. return 0;
  609. if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_NEW_EVENT)
  610. local_paca->hmi_event_available = 1;
  611. if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL)
  612. tb_invalid = true;
  613. return 1;
  614. }
  615. /* HMI exception handler called in virtual mode when irqs are next enabled. */
  616. int opal_handle_hmi_exception(struct pt_regs *regs)
  617. {
  618. /*
  619. * Check if HMI event is available.
  620. * if Yes, then wake kopald to process them.
  621. */
  622. if (!local_paca->hmi_event_available)
  623. return 0;
  624. local_paca->hmi_event_available = 0;
  625. opal_wake_poller();
  626. return 1;
  627. }
  628. static uint64_t find_recovery_address(uint64_t nip)
  629. {
  630. int i;
  631. for (i = 0; i < mc_recoverable_range_len; i++)
  632. if ((nip >= mc_recoverable_range[i].start_addr) &&
  633. (nip < mc_recoverable_range[i].end_addr))
  634. return mc_recoverable_range[i].recover_addr;
  635. return 0;
  636. }
  637. bool opal_mce_check_early_recovery(struct pt_regs *regs)
  638. {
  639. uint64_t recover_addr = 0;
  640. if (!opal.base || !opal.size)
  641. goto out;
  642. if ((regs->nip >= opal.base) &&
  643. (regs->nip < (opal.base + opal.size)))
  644. recover_addr = find_recovery_address(regs->nip);
  645. /*
  646. * Setup regs->nip to rfi into fixup address.
  647. */
  648. if (recover_addr)
  649. regs_set_return_ip(regs, recover_addr);
  650. out:
  651. return !!recover_addr;
  652. }
  653. static int __init opal_sysfs_init(void)
  654. {
  655. opal_kobj = kobject_create_and_add("opal", firmware_kobj);
  656. if (!opal_kobj) {
  657. pr_warn("kobject_create_and_add opal failed\n");
  658. return -ENOMEM;
  659. }
  660. return 0;
  661. }
  662. static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
  663. struct bin_attribute *bin_attr, char *buf,
  664. loff_t off, size_t count)
  665. {
  666. return memory_read_from_buffer(buf, count, &off, bin_attr->private,
  667. bin_attr->size);
  668. }
  669. static int opal_add_one_export(struct kobject *parent, const char *export_name,
  670. struct device_node *np, const char *prop_name)
  671. {
  672. struct bin_attribute *attr = NULL;
  673. const char *name = NULL;
  674. u64 vals[2];
  675. int rc;
  676. rc = of_property_read_u64_array(np, prop_name, &vals[0], 2);
  677. if (rc)
  678. goto out;
  679. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  680. if (!attr) {
  681. rc = -ENOMEM;
  682. goto out;
  683. }
  684. name = kstrdup(export_name, GFP_KERNEL);
  685. if (!name) {
  686. rc = -ENOMEM;
  687. goto out;
  688. }
  689. sysfs_bin_attr_init(attr);
  690. attr->attr.name = name;
  691. attr->attr.mode = 0400;
  692. attr->read = export_attr_read;
  693. attr->private = __va(vals[0]);
  694. attr->size = vals[1];
  695. rc = sysfs_create_bin_file(parent, attr);
  696. out:
  697. if (rc) {
  698. kfree(name);
  699. kfree(attr);
  700. }
  701. return rc;
  702. }
  703. static void opal_add_exported_attrs(struct device_node *np,
  704. struct kobject *kobj)
  705. {
  706. struct device_node *child;
  707. struct property *prop;
  708. for_each_property_of_node(np, prop) {
  709. int rc;
  710. if (!strcmp(prop->name, "name") ||
  711. !strcmp(prop->name, "phandle"))
  712. continue;
  713. rc = opal_add_one_export(kobj, prop->name, np, prop->name);
  714. if (rc) {
  715. pr_warn("Unable to add export %pOF/%s, rc = %d!\n",
  716. np, prop->name, rc);
  717. }
  718. }
  719. for_each_child_of_node(np, child) {
  720. struct kobject *child_kobj;
  721. child_kobj = kobject_create_and_add(child->name, kobj);
  722. if (!child_kobj) {
  723. pr_err("Unable to create export dir for %pOF\n", child);
  724. continue;
  725. }
  726. opal_add_exported_attrs(child, child_kobj);
  727. }
  728. }
  729. /*
  730. * opal_export_attrs: creates a sysfs node for each property listed in
  731. * the device-tree under /ibm,opal/firmware/exports/
  732. * All new sysfs nodes are created under /opal/exports/.
  733. * This allows for reserved memory regions (e.g. HDAT) to be read.
  734. * The new sysfs nodes are only readable by root.
  735. */
  736. static void opal_export_attrs(void)
  737. {
  738. struct device_node *np;
  739. struct kobject *kobj;
  740. int rc;
  741. np = of_find_node_by_path("/ibm,opal/firmware/exports");
  742. if (!np)
  743. return;
  744. /* Create new 'exports' directory - /sys/firmware/opal/exports */
  745. kobj = kobject_create_and_add("exports", opal_kobj);
  746. if (!kobj) {
  747. pr_warn("kobject_create_and_add() of exports failed\n");
  748. of_node_put(np);
  749. return;
  750. }
  751. opal_add_exported_attrs(np, kobj);
  752. /*
  753. * NB: symbol_map existed before the generic export interface so it
  754. * lives under the top level opal_kobj.
  755. */
  756. rc = opal_add_one_export(opal_kobj, "symbol_map",
  757. np->parent, "symbol-map");
  758. if (rc)
  759. pr_warn("Error %d creating OPAL symbols file\n", rc);
  760. of_node_put(np);
  761. }
  762. static void __init opal_dump_region_init(void)
  763. {
  764. void *addr;
  765. uint64_t size;
  766. int rc;
  767. if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
  768. return;
  769. /* Register kernel log buffer */
  770. addr = log_buf_addr_get();
  771. if (addr == NULL)
  772. return;
  773. size = log_buf_len_get();
  774. if (size == 0)
  775. return;
  776. rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
  777. __pa(addr), size);
  778. /* Don't warn if this is just an older OPAL that doesn't
  779. * know about that call
  780. */
  781. if (rc && rc != OPAL_UNSUPPORTED)
  782. pr_warn("DUMP: Failed to register kernel log buffer. "
  783. "rc = %d\n", rc);
  784. }
  785. static void __init opal_pdev_init(const char *compatible)
  786. {
  787. struct device_node *np;
  788. for_each_compatible_node(np, NULL, compatible)
  789. of_platform_device_create(np, NULL, NULL);
  790. }
  791. static void __init opal_imc_init_dev(void)
  792. {
  793. struct device_node *np;
  794. np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT);
  795. if (np)
  796. of_platform_device_create(np, NULL, NULL);
  797. of_node_put(np);
  798. }
  799. static int kopald(void *unused)
  800. {
  801. unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
  802. set_freezable();
  803. do {
  804. try_to_freeze();
  805. opal_handle_events();
  806. set_current_state(TASK_INTERRUPTIBLE);
  807. if (opal_have_pending_events())
  808. __set_current_state(TASK_RUNNING);
  809. else
  810. schedule_timeout(timeout);
  811. } while (!kthread_should_stop());
  812. return 0;
  813. }
  814. void opal_wake_poller(void)
  815. {
  816. if (kopald_tsk)
  817. wake_up_process(kopald_tsk);
  818. }
  819. static void __init opal_init_heartbeat(void)
  820. {
  821. /* Old firwmware, we assume the HVC heartbeat is sufficient */
  822. if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
  823. &opal_heartbeat) != 0)
  824. opal_heartbeat = 0;
  825. if (opal_heartbeat)
  826. kopald_tsk = kthread_run(kopald, NULL, "kopald");
  827. }
  828. static int __init opal_init(void)
  829. {
  830. struct device_node *np, *consoles, *leds;
  831. int rc;
  832. opal_node = of_find_node_by_path("/ibm,opal");
  833. if (!opal_node) {
  834. pr_warn("Device node not found\n");
  835. return -ENODEV;
  836. }
  837. /* Register OPAL consoles if any ports */
  838. consoles = of_find_node_by_path("/ibm,opal/consoles");
  839. if (consoles) {
  840. for_each_child_of_node(consoles, np) {
  841. if (!of_node_name_eq(np, "serial"))
  842. continue;
  843. of_platform_device_create(np, NULL, NULL);
  844. }
  845. of_node_put(consoles);
  846. }
  847. /* Initialise OPAL messaging system */
  848. opal_message_init(opal_node);
  849. /* Initialise OPAL asynchronous completion interface */
  850. opal_async_comp_init();
  851. /* Initialise OPAL sensor interface */
  852. opal_sensor_init();
  853. /* Initialise OPAL hypervisor maintainence interrupt handling */
  854. opal_hmi_handler_init();
  855. /* Create i2c platform devices */
  856. opal_pdev_init("ibm,opal-i2c");
  857. /* Handle non-volatile memory devices */
  858. opal_pdev_init("pmem-region");
  859. /* Setup a heatbeat thread if requested by OPAL */
  860. opal_init_heartbeat();
  861. /* Detect In-Memory Collection counters and create devices*/
  862. opal_imc_init_dev();
  863. /* Create leds platform devices */
  864. leds = of_find_node_by_path("/ibm,opal/leds");
  865. if (leds) {
  866. of_platform_device_create(leds, "opal_leds", NULL);
  867. of_node_put(leds);
  868. }
  869. /* Initialise OPAL message log interface */
  870. opal_msglog_init();
  871. /* Create "opal" kobject under /sys/firmware */
  872. rc = opal_sysfs_init();
  873. if (rc == 0) {
  874. /* Setup dump region interface */
  875. opal_dump_region_init();
  876. /* Setup error log interface */
  877. rc = opal_elog_init();
  878. /* Setup code update interface */
  879. opal_flash_update_init();
  880. /* Setup platform dump extract interface */
  881. opal_platform_dump_init();
  882. /* Setup system parameters interface */
  883. opal_sys_param_init();
  884. /* Setup message log sysfs interface. */
  885. opal_msglog_sysfs_init();
  886. /* Add all export properties*/
  887. opal_export_attrs();
  888. }
  889. /* Initialize platform devices: IPMI backend, PRD & flash interface */
  890. opal_pdev_init("ibm,opal-ipmi");
  891. opal_pdev_init("ibm,opal-flash");
  892. opal_pdev_init("ibm,opal-prd");
  893. /* Initialise platform device: oppanel interface */
  894. opal_pdev_init("ibm,opal-oppanel");
  895. /* Initialise OPAL kmsg dumper for flushing console on panic */
  896. opal_kmsg_init();
  897. /* Initialise OPAL powercap interface */
  898. opal_powercap_init();
  899. /* Initialise OPAL Power-Shifting-Ratio interface */
  900. opal_psr_init();
  901. /* Initialise OPAL sensor groups */
  902. opal_sensor_groups_init();
  903. /* Initialise OPAL Power control interface */
  904. opal_power_control_init();
  905. /* Initialize OPAL secure variables */
  906. opal_pdev_init("ibm,secvar-backend");
  907. return 0;
  908. }
  909. machine_subsys_initcall(powernv, opal_init);
  910. void opal_shutdown(void)
  911. {
  912. long rc = OPAL_BUSY;
  913. opal_event_shutdown();
  914. /*
  915. * Then sync with OPAL which ensure anything that can
  916. * potentially write to our memory has completed such
  917. * as an ongoing dump retrieval
  918. */
  919. while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
  920. rc = opal_sync_host_reboot();
  921. if (rc == OPAL_BUSY)
  922. opal_poll_events(NULL);
  923. else
  924. mdelay(10);
  925. }
  926. /* Unregister memory dump region */
  927. if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
  928. opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
  929. }
  930. /* Export this so that test modules can use it */
  931. EXPORT_SYMBOL_GPL(opal_invalid_call);
  932. EXPORT_SYMBOL_GPL(opal_xscom_read);
  933. EXPORT_SYMBOL_GPL(opal_xscom_write);
  934. EXPORT_SYMBOL_GPL(opal_ipmi_send);
  935. EXPORT_SYMBOL_GPL(opal_ipmi_recv);
  936. EXPORT_SYMBOL_GPL(opal_flash_read);
  937. EXPORT_SYMBOL_GPL(opal_flash_write);
  938. EXPORT_SYMBOL_GPL(opal_flash_erase);
  939. EXPORT_SYMBOL_GPL(opal_prd_msg);
  940. EXPORT_SYMBOL_GPL(opal_check_token);
  941. /* Convert a region of vmalloc memory to an opal sg list */
  942. struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
  943. unsigned long vmalloc_size)
  944. {
  945. struct opal_sg_list *sg, *first = NULL;
  946. unsigned long i = 0;
  947. sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
  948. if (!sg)
  949. goto nomem;
  950. first = sg;
  951. while (vmalloc_size > 0) {
  952. uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
  953. uint64_t length = min(vmalloc_size, PAGE_SIZE);
  954. sg->entry[i].data = cpu_to_be64(data);
  955. sg->entry[i].length = cpu_to_be64(length);
  956. i++;
  957. if (i >= SG_ENTRIES_PER_NODE) {
  958. struct opal_sg_list *next;
  959. next = kzalloc(PAGE_SIZE, GFP_KERNEL);
  960. if (!next)
  961. goto nomem;
  962. sg->length = cpu_to_be64(
  963. i * sizeof(struct opal_sg_entry) + 16);
  964. i = 0;
  965. sg->next = cpu_to_be64(__pa(next));
  966. sg = next;
  967. }
  968. vmalloc_addr += length;
  969. vmalloc_size -= length;
  970. }
  971. sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
  972. return first;
  973. nomem:
  974. pr_err("%s : Failed to allocate memory\n", __func__);
  975. opal_free_sg_list(first);
  976. return NULL;
  977. }
  978. void opal_free_sg_list(struct opal_sg_list *sg)
  979. {
  980. while (sg) {
  981. uint64_t next = be64_to_cpu(sg->next);
  982. kfree(sg);
  983. if (next)
  984. sg = __va(next);
  985. else
  986. sg = NULL;
  987. }
  988. }
  989. int opal_error_code(int rc)
  990. {
  991. switch (rc) {
  992. case OPAL_SUCCESS: return 0;
  993. case OPAL_PARAMETER: return -EINVAL;
  994. case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
  995. case OPAL_BUSY:
  996. case OPAL_BUSY_EVENT: return -EBUSY;
  997. case OPAL_NO_MEM: return -ENOMEM;
  998. case OPAL_PERMISSION: return -EPERM;
  999. case OPAL_UNSUPPORTED: return -EIO;
  1000. case OPAL_HARDWARE: return -EIO;
  1001. case OPAL_INTERNAL_ERROR: return -EIO;
  1002. case OPAL_TIMEOUT: return -ETIMEDOUT;
  1003. default:
  1004. pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
  1005. return -EIO;
  1006. }
  1007. }
  1008. void powernv_set_nmmu_ptcr(unsigned long ptcr)
  1009. {
  1010. int rc;
  1011. if (firmware_has_feature(FW_FEATURE_OPAL)) {
  1012. rc = opal_nmmu_set_ptcr(-1UL, ptcr);
  1013. if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
  1014. pr_warn("%s: Unable to set nest mmu ptcr\n", __func__);
  1015. }
  1016. }
  1017. EXPORT_SYMBOL_GPL(opal_poll_events);
  1018. EXPORT_SYMBOL_GPL(opal_rtc_read);
  1019. EXPORT_SYMBOL_GPL(opal_rtc_write);
  1020. EXPORT_SYMBOL_GPL(opal_tpo_read);
  1021. EXPORT_SYMBOL_GPL(opal_tpo_write);
  1022. EXPORT_SYMBOL_GPL(opal_i2c_request);
  1023. /* Export these symbols for PowerNV LED class driver */
  1024. EXPORT_SYMBOL_GPL(opal_leds_get_ind);
  1025. EXPORT_SYMBOL_GPL(opal_leds_set_ind);
  1026. /* Export this symbol for PowerNV Operator Panel class driver */
  1027. EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
  1028. /* Export this for KVM */
  1029. EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
  1030. EXPORT_SYMBOL_GPL(opal_int_eoi);
  1031. EXPORT_SYMBOL_GPL(opal_error_code);
  1032. /* Export the below symbol for NX compression */
  1033. EXPORT_SYMBOL(opal_nx_coproc_init);