acpi.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ARM64 Specific Low-Level ACPI Boot Support
  4. *
  5. * Copyright (C) 2013-2014, Linaro Ltd.
  6. * Author: Al Stone <[email protected]>
  7. * Author: Graeme Gregory <[email protected]>
  8. * Author: Hanjun Guo <[email protected]>
  9. * Author: Tomasz Nowicki <[email protected]>
  10. * Author: Naresh Bhat <[email protected]>
  11. */
  12. #define pr_fmt(fmt) "ACPI: " fmt
  13. #include <linux/acpi.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/efi.h>
  16. #include <linux/efi-bgrt.h>
  17. #include <linux/init.h>
  18. #include <linux/irq.h>
  19. #include <linux/irqdomain.h>
  20. #include <linux/irq_work.h>
  21. #include <linux/memblock.h>
  22. #include <linux/of_fdt.h>
  23. #include <linux/libfdt.h>
  24. #include <linux/smp.h>
  25. #include <linux/serial_core.h>
  26. #include <linux/pgtable.h>
  27. #include <acpi/ghes.h>
  28. #include <asm/cputype.h>
  29. #include <asm/cpu_ops.h>
  30. #include <asm/daifflags.h>
  31. #include <asm/smp_plat.h>
  32. int acpi_noirq = 1; /* skip ACPI IRQ initialization */
  33. int acpi_disabled = 1;
  34. EXPORT_SYMBOL(acpi_disabled);
  35. int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
  36. EXPORT_SYMBOL(acpi_pci_disabled);
  37. static bool param_acpi_off __initdata;
  38. static bool param_acpi_on __initdata;
  39. static bool param_acpi_force __initdata;
  40. static int __init parse_acpi(char *arg)
  41. {
  42. if (!arg)
  43. return -EINVAL;
  44. /* "acpi=off" disables both ACPI table parsing and interpreter */
  45. if (strcmp(arg, "off") == 0)
  46. param_acpi_off = true;
  47. else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
  48. param_acpi_on = true;
  49. else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
  50. param_acpi_force = true;
  51. else
  52. return -EINVAL; /* Core will print when we return error */
  53. return 0;
  54. }
  55. early_param("acpi", parse_acpi);
  56. static bool __init dt_is_stub(void)
  57. {
  58. int node;
  59. fdt_for_each_subnode(node, initial_boot_params, 0) {
  60. const char *name = fdt_get_name(initial_boot_params, node, NULL);
  61. if (strcmp(name, "chosen") == 0)
  62. continue;
  63. if (strcmp(name, "hypervisor") == 0 &&
  64. of_flat_dt_is_compatible(node, "xen,xen"))
  65. continue;
  66. return false;
  67. }
  68. return true;
  69. }
  70. /*
  71. * __acpi_map_table() will be called before page_init(), so early_ioremap()
  72. * or early_memremap() should be called here to for ACPI table mapping.
  73. */
  74. void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
  75. {
  76. if (!size)
  77. return NULL;
  78. return early_memremap(phys, size);
  79. }
  80. void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
  81. {
  82. if (!map || !size)
  83. return;
  84. early_memunmap(map, size);
  85. }
  86. bool __init acpi_psci_present(void)
  87. {
  88. return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
  89. }
  90. /* Whether HVC must be used instead of SMC as the PSCI conduit */
  91. bool acpi_psci_use_hvc(void)
  92. {
  93. return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
  94. }
  95. /*
  96. * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
  97. * checks on it
  98. *
  99. * Return 0 on success, <0 on failure
  100. */
  101. static int __init acpi_fadt_sanity_check(void)
  102. {
  103. struct acpi_table_header *table;
  104. struct acpi_table_fadt *fadt;
  105. acpi_status status;
  106. int ret = 0;
  107. /*
  108. * FADT is required on arm64; retrieve it to check its presence
  109. * and carry out revision and ACPI HW reduced compliancy tests
  110. */
  111. status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
  112. if (ACPI_FAILURE(status)) {
  113. const char *msg = acpi_format_exception(status);
  114. pr_err("Failed to get FADT table, %s\n", msg);
  115. return -ENODEV;
  116. }
  117. fadt = (struct acpi_table_fadt *)table;
  118. /*
  119. * Revision in table header is the FADT Major revision, and there
  120. * is a minor revision of FADT which was introduced by ACPI 5.1,
  121. * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
  122. * boot protocol configuration data.
  123. */
  124. if (table->revision < 5 ||
  125. (table->revision == 5 && fadt->minor_revision < 1)) {
  126. pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
  127. table->revision, fadt->minor_revision);
  128. if (!fadt->arm_boot_flags) {
  129. ret = -EINVAL;
  130. goto out;
  131. }
  132. pr_err("FADT has ARM boot flags set, assuming 5.1\n");
  133. }
  134. if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
  135. pr_err("FADT not ACPI hardware reduced compliant\n");
  136. ret = -EINVAL;
  137. }
  138. out:
  139. /*
  140. * acpi_get_table() creates FADT table mapping that
  141. * should be released after parsing and before resuming boot
  142. */
  143. acpi_put_table(table);
  144. return ret;
  145. }
  146. /*
  147. * acpi_boot_table_init() called from setup_arch(), always.
  148. * 1. find RSDP and get its address, and then find XSDT
  149. * 2. extract all tables and checksums them all
  150. * 3. check ACPI FADT revision
  151. * 4. check ACPI FADT HW reduced flag
  152. *
  153. * We can parse ACPI boot-time tables such as MADT after
  154. * this function is called.
  155. *
  156. * On return ACPI is enabled if either:
  157. *
  158. * - ACPI tables are initialized and sanity checks passed
  159. * - acpi=force was passed in the command line and ACPI was not disabled
  160. * explicitly through acpi=off command line parameter
  161. *
  162. * ACPI is disabled on function return otherwise
  163. */
  164. void __init acpi_boot_table_init(void)
  165. {
  166. /*
  167. * Enable ACPI instead of device tree unless
  168. * - ACPI has been disabled explicitly (acpi=off), or
  169. * - the device tree is not empty (it has more than just a /chosen node,
  170. * and a /hypervisor node when running on Xen)
  171. * and ACPI has not been [force] enabled (acpi=on|force)
  172. */
  173. if (param_acpi_off ||
  174. (!param_acpi_on && !param_acpi_force && !dt_is_stub()))
  175. goto done;
  176. /*
  177. * ACPI is disabled at this point. Enable it in order to parse
  178. * the ACPI tables and carry out sanity checks
  179. */
  180. enable_acpi();
  181. /*
  182. * If ACPI tables are initialized and FADT sanity checks passed,
  183. * leave ACPI enabled and carry on booting; otherwise disable ACPI
  184. * on initialization error.
  185. * If acpi=force was passed on the command line it forces ACPI
  186. * to be enabled even if its initialization failed.
  187. */
  188. if (acpi_table_init() || acpi_fadt_sanity_check()) {
  189. pr_err("Failed to init ACPI tables\n");
  190. if (!param_acpi_force)
  191. disable_acpi();
  192. }
  193. done:
  194. if (acpi_disabled) {
  195. if (earlycon_acpi_spcr_enable)
  196. early_init_dt_scan_chosen_stdout();
  197. } else {
  198. acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
  199. if (IS_ENABLED(CONFIG_ACPI_BGRT))
  200. acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
  201. }
  202. }
  203. static pgprot_t __acpi_get_writethrough_mem_attribute(void)
  204. {
  205. /*
  206. * Although UEFI specifies the use of Normal Write-through for
  207. * EFI_MEMORY_WT, it is seldom used in practice and not implemented
  208. * by most (all?) CPUs. Rather than allocate a MAIR just for this
  209. * purpose, emit a warning and use Normal Non-cacheable instead.
  210. */
  211. pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
  212. return __pgprot(PROT_NORMAL_NC);
  213. }
  214. pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
  215. {
  216. /*
  217. * According to "Table 8 Map: EFI memory types to AArch64 memory
  218. * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
  219. * mapped to a corresponding MAIR attribute encoding.
  220. * The EFI memory attribute advises all possible capabilities
  221. * of a memory region.
  222. */
  223. u64 attr;
  224. attr = efi_mem_attributes(addr);
  225. if (attr & EFI_MEMORY_WB)
  226. return PAGE_KERNEL;
  227. if (attr & EFI_MEMORY_WC)
  228. return __pgprot(PROT_NORMAL_NC);
  229. if (attr & EFI_MEMORY_WT)
  230. return __acpi_get_writethrough_mem_attribute();
  231. return __pgprot(PROT_DEVICE_nGnRnE);
  232. }
  233. void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
  234. {
  235. efi_memory_desc_t *md, *region = NULL;
  236. pgprot_t prot;
  237. if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
  238. return NULL;
  239. for_each_efi_memory_desc(md) {
  240. u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
  241. if (phys < md->phys_addr || phys >= end)
  242. continue;
  243. if (phys + size > end) {
  244. pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
  245. return NULL;
  246. }
  247. region = md;
  248. break;
  249. }
  250. /*
  251. * It is fine for AML to remap regions that are not represented in the
  252. * EFI memory map at all, as it only describes normal memory, and MMIO
  253. * regions that require a virtual mapping to make them accessible to
  254. * the EFI runtime services.
  255. */
  256. prot = __pgprot(PROT_DEVICE_nGnRnE);
  257. if (region) {
  258. switch (region->type) {
  259. case EFI_LOADER_CODE:
  260. case EFI_LOADER_DATA:
  261. case EFI_BOOT_SERVICES_CODE:
  262. case EFI_BOOT_SERVICES_DATA:
  263. case EFI_CONVENTIONAL_MEMORY:
  264. case EFI_PERSISTENT_MEMORY:
  265. if (memblock_is_map_memory(phys) ||
  266. !memblock_is_region_memory(phys, size)) {
  267. pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
  268. return NULL;
  269. }
  270. /*
  271. * Mapping kernel memory is permitted if the region in
  272. * question is covered by a single memblock with the
  273. * NOMAP attribute set: this enables the use of ACPI
  274. * table overrides passed via initramfs, which are
  275. * reserved in memory using arch_reserve_mem_area()
  276. * below. As this particular use case only requires
  277. * read access, fall through to the R/O mapping case.
  278. */
  279. fallthrough;
  280. case EFI_RUNTIME_SERVICES_CODE:
  281. /*
  282. * This would be unusual, but not problematic per se,
  283. * as long as we take care not to create a writable
  284. * mapping for executable code.
  285. */
  286. prot = PAGE_KERNEL_RO;
  287. break;
  288. case EFI_ACPI_RECLAIM_MEMORY:
  289. /*
  290. * ACPI reclaim memory is used to pass firmware tables
  291. * and other data that is intended for consumption by
  292. * the OS only, which may decide it wants to reclaim
  293. * that memory and use it for something else. We never
  294. * do that, but we usually add it to the linear map
  295. * anyway, in which case we should use the existing
  296. * mapping.
  297. */
  298. if (memblock_is_map_memory(phys))
  299. return (void __iomem *)__phys_to_virt(phys);
  300. fallthrough;
  301. default:
  302. if (region->attribute & EFI_MEMORY_WB)
  303. prot = PAGE_KERNEL;
  304. else if (region->attribute & EFI_MEMORY_WC)
  305. prot = __pgprot(PROT_NORMAL_NC);
  306. else if (region->attribute & EFI_MEMORY_WT)
  307. prot = __acpi_get_writethrough_mem_attribute();
  308. }
  309. }
  310. return ioremap_prot(phys, size, pgprot_val(prot));
  311. }
  312. /*
  313. * Claim Synchronous External Aborts as a firmware first notification.
  314. *
  315. * Used by KVM and the arch do_sea handler.
  316. * @regs may be NULL when called from process context.
  317. */
  318. int apei_claim_sea(struct pt_regs *regs)
  319. {
  320. int err = -ENOENT;
  321. bool return_to_irqs_enabled;
  322. unsigned long current_flags;
  323. if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
  324. return err;
  325. current_flags = local_daif_save_flags();
  326. /* current_flags isn't useful here as daif doesn't tell us about pNMI */
  327. return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
  328. if (regs)
  329. return_to_irqs_enabled = interrupts_enabled(regs);
  330. /*
  331. * SEA can interrupt SError, mask it and describe this as an NMI so
  332. * that APEI defers the handling.
  333. */
  334. local_daif_restore(DAIF_ERRCTX);
  335. nmi_enter();
  336. err = ghes_notify_sea();
  337. nmi_exit();
  338. /*
  339. * APEI NMI-like notifications are deferred to irq_work. Unless
  340. * we interrupted irqs-masked code, we can do that now.
  341. */
  342. if (!err) {
  343. if (return_to_irqs_enabled) {
  344. local_daif_restore(DAIF_PROCCTX_NOIRQ);
  345. __irq_enter();
  346. irq_work_run();
  347. __irq_exit();
  348. } else {
  349. pr_warn_ratelimited("APEI work queued but not completed");
  350. err = -EINPROGRESS;
  351. }
  352. }
  353. local_daif_restore(current_flags);
  354. return err;
  355. }
  356. void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
  357. {
  358. memblock_mark_nomap(addr, size);
  359. }