boot.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * boot.c - Architecture-Specific Low-Level ACPI Boot Support
  4. *
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
  6. * Copyright (C) 2001 Jun Nakajima <[email protected]>
  7. */
  8. #define pr_fmt(fmt) "ACPI: " fmt
  9. #include <linux/init.h>
  10. #include <linux/acpi.h>
  11. #include <linux/acpi_pmtmr.h>
  12. #include <linux/efi.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/export.h>
  15. #include <linux/dmi.h>
  16. #include <linux/irq.h>
  17. #include <linux/slab.h>
  18. #include <linux/memblock.h>
  19. #include <linux/ioport.h>
  20. #include <linux/pci.h>
  21. #include <linux/efi-bgrt.h>
  22. #include <linux/serial_core.h>
  23. #include <linux/pgtable.h>
  24. #include <asm/e820/api.h>
  25. #include <asm/irqdomain.h>
  26. #include <asm/pci_x86.h>
  27. #include <asm/io_apic.h>
  28. #include <asm/apic.h>
  29. #include <asm/io.h>
  30. #include <asm/mpspec.h>
  31. #include <asm/smp.h>
  32. #include <asm/i8259.h>
  33. #include <asm/setup.h>
  34. #include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
  35. static int __initdata acpi_force = 0;
  36. int acpi_disabled;
  37. EXPORT_SYMBOL(acpi_disabled);
  38. #ifdef CONFIG_X86_64
  39. # include <asm/proto.h>
  40. #endif /* X86 */
  41. int acpi_noirq; /* skip ACPI IRQ initialization */
  42. static int acpi_nobgrt; /* skip ACPI BGRT */
  43. int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
  44. EXPORT_SYMBOL(acpi_pci_disabled);
  45. int acpi_lapic;
  46. int acpi_ioapic;
  47. int acpi_strict;
  48. int acpi_disable_cmcff;
  49. /* ACPI SCI override configuration */
  50. u8 acpi_sci_flags __initdata;
  51. u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
  52. int acpi_skip_timer_override __initdata;
  53. int acpi_use_timer_override __initdata;
  54. int acpi_fix_pin2_polarity __initdata;
  55. #ifdef CONFIG_X86_LOCAL_APIC
  56. static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  57. static bool acpi_support_online_capable;
  58. #endif
  59. #ifdef CONFIG_X86_64
  60. /* Physical address of the Multiprocessor Wakeup Structure mailbox */
  61. static u64 acpi_mp_wake_mailbox_paddr;
  62. /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
  63. static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
  64. #endif
  65. #ifdef CONFIG_X86_IO_APIC
  66. /*
  67. * Locks related to IOAPIC hotplug
  68. * Hotplug side:
  69. * ->device_hotplug_lock
  70. * ->acpi_ioapic_lock
  71. * ->ioapic_lock
  72. * Interrupt mapping side:
  73. * ->acpi_ioapic_lock
  74. * ->ioapic_mutex
  75. * ->ioapic_lock
  76. */
  77. static DEFINE_MUTEX(acpi_ioapic_lock);
  78. #endif
  79. /* --------------------------------------------------------------------------
  80. Boot-time Configuration
  81. -------------------------------------------------------------------------- */
  82. /*
  83. * The default interrupt routing model is PIC (8259). This gets
  84. * overridden if IOAPICs are enumerated (below).
  85. */
  86. enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  87. /*
  88. * ISA irqs by default are the first 16 gsis but can be
  89. * any gsi as specified by an interrupt source override.
  90. */
  91. static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
  92. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
  93. };
  94. /*
  95. * This is just a simple wrapper around early_memremap(),
  96. * with sanity checks for phys == 0 and size == 0.
  97. */
  98. void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
  99. {
  100. if (!phys || !size)
  101. return NULL;
  102. return early_memremap(phys, size);
  103. }
  104. void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
  105. {
  106. if (!map || !size)
  107. return;
  108. early_memunmap(map, size);
  109. }
  110. #ifdef CONFIG_X86_LOCAL_APIC
  111. static int __init acpi_parse_madt(struct acpi_table_header *table)
  112. {
  113. struct acpi_table_madt *madt = NULL;
  114. if (!boot_cpu_has(X86_FEATURE_APIC))
  115. return -EINVAL;
  116. madt = (struct acpi_table_madt *)table;
  117. if (!madt) {
  118. pr_warn("Unable to map MADT\n");
  119. return -ENODEV;
  120. }
  121. if (madt->address) {
  122. acpi_lapic_addr = (u64) madt->address;
  123. pr_debug("Local APIC address 0x%08x\n", madt->address);
  124. }
  125. if (madt->flags & ACPI_MADT_PCAT_COMPAT)
  126. legacy_pic_pcat_compat();
  127. /* ACPI 6.3 and newer support the online capable bit. */
  128. if (acpi_gbl_FADT.header.revision > 6 ||
  129. (acpi_gbl_FADT.header.revision == 6 &&
  130. acpi_gbl_FADT.minor_revision >= 3))
  131. acpi_support_online_capable = true;
  132. default_acpi_madt_oem_check(madt->header.oem_id,
  133. madt->header.oem_table_id);
  134. return 0;
  135. }
  136. /**
  137. * acpi_register_lapic - register a local apic and generates a logic cpu number
  138. * @id: local apic id to register
  139. * @acpiid: ACPI id to register
  140. * @enabled: this cpu is enabled or not
  141. *
  142. * Returns the logic cpu number which maps to the local apic
  143. */
  144. static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
  145. {
  146. unsigned int ver = 0;
  147. int cpu;
  148. if (id >= MAX_LOCAL_APIC) {
  149. pr_info("skipped apicid that is too big\n");
  150. return -EINVAL;
  151. }
  152. if (!enabled) {
  153. ++disabled_cpus;
  154. return -EINVAL;
  155. }
  156. if (boot_cpu_physical_apicid != -1U)
  157. ver = boot_cpu_apic_version;
  158. cpu = generic_processor_info(id, ver);
  159. if (cpu >= 0)
  160. early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
  161. return cpu;
  162. }
  163. static bool __init acpi_is_processor_usable(u32 lapic_flags)
  164. {
  165. if (lapic_flags & ACPI_MADT_ENABLED)
  166. return true;
  167. if (!acpi_support_online_capable ||
  168. (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
  169. return true;
  170. return false;
  171. }
  172. static int __init
  173. acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
  174. {
  175. struct acpi_madt_local_x2apic *processor = NULL;
  176. #ifdef CONFIG_X86_X2APIC
  177. u32 apic_id;
  178. u8 enabled;
  179. #endif
  180. processor = (struct acpi_madt_local_x2apic *)header;
  181. if (BAD_MADT_ENTRY(processor, end))
  182. return -EINVAL;
  183. acpi_table_print_madt_entry(&header->common);
  184. #ifdef CONFIG_X86_X2APIC
  185. apic_id = processor->local_apic_id;
  186. enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
  187. /* Ignore invalid ID */
  188. if (apic_id == 0xffffffff)
  189. return 0;
  190. /* don't register processors that cannot be onlined */
  191. if (!acpi_is_processor_usable(processor->lapic_flags))
  192. return 0;
  193. /*
  194. * We need to register disabled CPU as well to permit
  195. * counting disabled CPUs. This allows us to size
  196. * cpus_possible_map more accurately, to permit
  197. * to not preallocating memory for all NR_CPUS
  198. * when we use CPU hotplug.
  199. */
  200. if (!apic->apic_id_valid(apic_id)) {
  201. if (enabled)
  202. pr_warn("x2apic entry ignored\n");
  203. return 0;
  204. }
  205. acpi_register_lapic(apic_id, processor->uid, enabled);
  206. #else
  207. pr_warn("x2apic entry ignored\n");
  208. #endif
  209. return 0;
  210. }
  211. static int __init
  212. acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
  213. {
  214. struct acpi_madt_local_apic *processor = NULL;
  215. processor = (struct acpi_madt_local_apic *)header;
  216. if (BAD_MADT_ENTRY(processor, end))
  217. return -EINVAL;
  218. acpi_table_print_madt_entry(&header->common);
  219. /* Ignore invalid ID */
  220. if (processor->id == 0xff)
  221. return 0;
  222. /* don't register processors that can not be onlined */
  223. if (!acpi_is_processor_usable(processor->lapic_flags))
  224. return 0;
  225. /*
  226. * We need to register disabled CPU as well to permit
  227. * counting disabled CPUs. This allows us to size
  228. * cpus_possible_map more accurately, to permit
  229. * to not preallocating memory for all NR_CPUS
  230. * when we use CPU hotplug.
  231. */
  232. acpi_register_lapic(processor->id, /* APIC ID */
  233. processor->processor_id, /* ACPI ID */
  234. processor->lapic_flags & ACPI_MADT_ENABLED);
  235. return 0;
  236. }
  237. static int __init
  238. acpi_parse_sapic(union acpi_subtable_headers *header, const unsigned long end)
  239. {
  240. struct acpi_madt_local_sapic *processor = NULL;
  241. processor = (struct acpi_madt_local_sapic *)header;
  242. if (BAD_MADT_ENTRY(processor, end))
  243. return -EINVAL;
  244. acpi_table_print_madt_entry(&header->common);
  245. acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
  246. processor->processor_id, /* ACPI ID */
  247. processor->lapic_flags & ACPI_MADT_ENABLED);
  248. return 0;
  249. }
  250. static int __init
  251. acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
  252. const unsigned long end)
  253. {
  254. struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
  255. lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
  256. if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
  257. return -EINVAL;
  258. acpi_table_print_madt_entry(&header->common);
  259. acpi_lapic_addr = lapic_addr_ovr->address;
  260. return 0;
  261. }
  262. static int __init
  263. acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
  264. const unsigned long end)
  265. {
  266. struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
  267. x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header;
  268. if (BAD_MADT_ENTRY(x2apic_nmi, end))
  269. return -EINVAL;
  270. acpi_table_print_madt_entry(&header->common);
  271. if (x2apic_nmi->lint != 1)
  272. pr_warn("NMI not connected to LINT 1!\n");
  273. return 0;
  274. }
  275. static int __init
  276. acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
  277. {
  278. struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
  279. lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
  280. if (BAD_MADT_ENTRY(lapic_nmi, end))
  281. return -EINVAL;
  282. acpi_table_print_madt_entry(&header->common);
  283. if (lapic_nmi->lint != 1)
  284. pr_warn("NMI not connected to LINT 1!\n");
  285. return 0;
  286. }
  287. #ifdef CONFIG_X86_64
  288. static int acpi_wakeup_cpu(int apicid, unsigned long start_ip)
  289. {
  290. /*
  291. * Remap mailbox memory only for the first call to acpi_wakeup_cpu().
  292. *
  293. * Wakeup of secondary CPUs is fully serialized in the core code.
  294. * No need to protect acpi_mp_wake_mailbox from concurrent accesses.
  295. */
  296. if (!acpi_mp_wake_mailbox) {
  297. acpi_mp_wake_mailbox = memremap(acpi_mp_wake_mailbox_paddr,
  298. sizeof(*acpi_mp_wake_mailbox),
  299. MEMREMAP_WB);
  300. }
  301. /*
  302. * Mailbox memory is shared between the firmware and OS. Firmware will
  303. * listen on mailbox command address, and once it receives the wakeup
  304. * command, the CPU associated with the given apicid will be booted.
  305. *
  306. * The value of 'apic_id' and 'wakeup_vector' must be visible to the
  307. * firmware before the wakeup command is visible. smp_store_release()
  308. * ensures ordering and visibility.
  309. */
  310. acpi_mp_wake_mailbox->apic_id = apicid;
  311. acpi_mp_wake_mailbox->wakeup_vector = start_ip;
  312. smp_store_release(&acpi_mp_wake_mailbox->command,
  313. ACPI_MP_WAKE_COMMAND_WAKEUP);
  314. /*
  315. * Wait for the CPU to wake up.
  316. *
  317. * The CPU being woken up is essentially in a spin loop waiting to be
  318. * woken up. It should not take long for it wake up and acknowledge by
  319. * zeroing out ->command.
  320. *
  321. * ACPI specification doesn't provide any guidance on how long kernel
  322. * has to wait for a wake up acknowledgement. It also doesn't provide
  323. * a way to cancel a wake up request if it takes too long.
  324. *
  325. * In TDX environment, the VMM has control over how long it takes to
  326. * wake up secondary. It can postpone scheduling secondary vCPU
  327. * indefinitely. Giving up on wake up request and reporting error opens
  328. * possible attack vector for VMM: it can wake up a secondary CPU when
  329. * kernel doesn't expect it. Wait until positive result of the wake up
  330. * request.
  331. */
  332. while (READ_ONCE(acpi_mp_wake_mailbox->command))
  333. cpu_relax();
  334. return 0;
  335. }
  336. #endif /* CONFIG_X86_64 */
  337. #endif /* CONFIG_X86_LOCAL_APIC */
  338. #ifdef CONFIG_X86_IO_APIC
  339. #define MP_ISA_BUS 0
  340. static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
  341. u8 trigger, u32 gsi);
  342. static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
  343. u32 gsi)
  344. {
  345. /*
  346. * Check bus_irq boundary.
  347. */
  348. if (bus_irq >= NR_IRQS_LEGACY) {
  349. pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
  350. return;
  351. }
  352. /*
  353. * TBD: This check is for faulty timer entries, where the override
  354. * erroneously sets the trigger to level, resulting in a HUGE
  355. * increase of timer interrupts!
  356. */
  357. if ((bus_irq == 0) && (trigger == 3))
  358. trigger = 1;
  359. if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
  360. return;
  361. /*
  362. * Reset default identity mapping if gsi is also an legacy IRQ,
  363. * otherwise there will be more than one entry with the same GSI
  364. * and acpi_isa_irq_to_gsi() may give wrong result.
  365. */
  366. if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
  367. isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
  368. isa_irq_to_gsi[bus_irq] = gsi;
  369. }
  370. static void mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
  371. int polarity)
  372. {
  373. #ifdef CONFIG_X86_MPPARSE
  374. struct mpc_intsrc mp_irq;
  375. struct pci_dev *pdev;
  376. unsigned char number;
  377. unsigned int devfn;
  378. int ioapic;
  379. u8 pin;
  380. if (!acpi_ioapic)
  381. return;
  382. if (!dev || !dev_is_pci(dev))
  383. return;
  384. pdev = to_pci_dev(dev);
  385. number = pdev->bus->number;
  386. devfn = pdev->devfn;
  387. pin = pdev->pin;
  388. /* print the entry should happen on mptable identically */
  389. mp_irq.type = MP_INTSRC;
  390. mp_irq.irqtype = mp_INT;
  391. mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
  392. (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
  393. mp_irq.srcbus = number;
  394. mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
  395. ioapic = mp_find_ioapic(gsi);
  396. mp_irq.dstapic = mpc_ioapic_id(ioapic);
  397. mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
  398. mp_save_irq(&mp_irq);
  399. #endif
  400. }
  401. static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
  402. u8 trigger, u32 gsi)
  403. {
  404. struct mpc_intsrc mp_irq;
  405. int ioapic, pin;
  406. /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
  407. ioapic = mp_find_ioapic(gsi);
  408. if (ioapic < 0) {
  409. pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
  410. return ioapic;
  411. }
  412. pin = mp_find_ioapic_pin(ioapic, gsi);
  413. mp_irq.type = MP_INTSRC;
  414. mp_irq.irqtype = mp_INT;
  415. mp_irq.irqflag = (trigger << 2) | polarity;
  416. mp_irq.srcbus = MP_ISA_BUS;
  417. mp_irq.srcbusirq = bus_irq;
  418. mp_irq.dstapic = mpc_ioapic_id(ioapic);
  419. mp_irq.dstirq = pin;
  420. mp_save_irq(&mp_irq);
  421. return 0;
  422. }
  423. static int __init
  424. acpi_parse_ioapic(union acpi_subtable_headers * header, const unsigned long end)
  425. {
  426. struct acpi_madt_io_apic *ioapic = NULL;
  427. struct ioapic_domain_cfg cfg = {
  428. .type = IOAPIC_DOMAIN_DYNAMIC,
  429. .ops = &mp_ioapic_irqdomain_ops,
  430. };
  431. ioapic = (struct acpi_madt_io_apic *)header;
  432. if (BAD_MADT_ENTRY(ioapic, end))
  433. return -EINVAL;
  434. acpi_table_print_madt_entry(&header->common);
  435. /* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */
  436. if (ioapic->global_irq_base < nr_legacy_irqs())
  437. cfg.type = IOAPIC_DOMAIN_LEGACY;
  438. mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base,
  439. &cfg);
  440. return 0;
  441. }
  442. /*
  443. * Parse Interrupt Source Override for the ACPI SCI
  444. */
  445. static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
  446. {
  447. if (trigger == 0) /* compatible SCI trigger is level */
  448. trigger = 3;
  449. if (polarity == 0) /* compatible SCI polarity is low */
  450. polarity = 3;
  451. /* Command-line over-ride via acpi_sci= */
  452. if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
  453. trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
  454. if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
  455. polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
  456. if (bus_irq < NR_IRQS_LEGACY)
  457. mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
  458. else
  459. mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
  460. acpi_penalize_sci_irq(bus_irq, trigger, polarity);
  461. /*
  462. * stash over-ride to indicate we've been here
  463. * and for later update of acpi_gbl_FADT
  464. */
  465. acpi_sci_override_gsi = gsi;
  466. return;
  467. }
  468. static int __init
  469. acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
  470. const unsigned long end)
  471. {
  472. struct acpi_madt_interrupt_override *intsrc = NULL;
  473. intsrc = (struct acpi_madt_interrupt_override *)header;
  474. if (BAD_MADT_ENTRY(intsrc, end))
  475. return -EINVAL;
  476. acpi_table_print_madt_entry(&header->common);
  477. if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
  478. acpi_sci_ioapic_setup(intsrc->source_irq,
  479. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  480. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
  481. intsrc->global_irq);
  482. return 0;
  483. }
  484. if (intsrc->source_irq == 0) {
  485. if (acpi_skip_timer_override) {
  486. pr_warn("BIOS IRQ0 override ignored.\n");
  487. return 0;
  488. }
  489. if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
  490. && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
  491. intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
  492. pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
  493. }
  494. }
  495. mp_override_legacy_irq(intsrc->source_irq,
  496. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  497. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
  498. intsrc->global_irq);
  499. return 0;
  500. }
  501. static int __init
  502. acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
  503. {
  504. struct acpi_madt_nmi_source *nmi_src = NULL;
  505. nmi_src = (struct acpi_madt_nmi_source *)header;
  506. if (BAD_MADT_ENTRY(nmi_src, end))
  507. return -EINVAL;
  508. acpi_table_print_madt_entry(&header->common);
  509. /* TBD: Support nimsrc entries? */
  510. return 0;
  511. }
  512. #endif /* CONFIG_X86_IO_APIC */
  513. /*
  514. * acpi_pic_sci_set_trigger()
  515. *
  516. * use ELCR to set PIC-mode trigger type for SCI
  517. *
  518. * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
  519. * it may require Edge Trigger -- use "acpi_sci=edge"
  520. *
  521. * Port 0x4d0-4d1 are ELCR1 and ELCR2, the Edge/Level Control Registers
  522. * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
  523. * ELCR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
  524. * ELCR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
  525. */
  526. void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
  527. {
  528. unsigned int mask = 1 << irq;
  529. unsigned int old, new;
  530. /* Real old ELCR mask */
  531. old = inb(PIC_ELCR1) | (inb(PIC_ELCR2) << 8);
  532. /*
  533. * If we use ACPI to set PCI IRQs, then we should clear ELCR
  534. * since we will set it correctly as we enable the PCI irq
  535. * routing.
  536. */
  537. new = acpi_noirq ? old : 0;
  538. /*
  539. * Update SCI information in the ELCR, it isn't in the PCI
  540. * routing tables..
  541. */
  542. switch (trigger) {
  543. case 1: /* Edge - clear */
  544. new &= ~mask;
  545. break;
  546. case 3: /* Level - set */
  547. new |= mask;
  548. break;
  549. }
  550. if (old == new)
  551. return;
  552. pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
  553. outb(new, PIC_ELCR1);
  554. outb(new >> 8, PIC_ELCR2);
  555. }
  556. int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
  557. {
  558. int rc, irq, trigger, polarity;
  559. if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
  560. *irqp = gsi;
  561. return 0;
  562. }
  563. rc = acpi_get_override_irq(gsi, &trigger, &polarity);
  564. if (rc)
  565. return rc;
  566. trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
  567. polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
  568. irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
  569. if (irq < 0)
  570. return irq;
  571. *irqp = irq;
  572. return 0;
  573. }
  574. EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
  575. int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
  576. {
  577. if (isa_irq < nr_legacy_irqs() &&
  578. isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
  579. *gsi = isa_irq_to_gsi[isa_irq];
  580. return 0;
  581. }
  582. return -1;
  583. }
  584. static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
  585. int trigger, int polarity)
  586. {
  587. #ifdef CONFIG_PCI
  588. /*
  589. * Make sure all (legacy) PCI IRQs are set as level-triggered.
  590. */
  591. if (trigger == ACPI_LEVEL_SENSITIVE)
  592. elcr_set_level_irq(gsi);
  593. #endif
  594. return gsi;
  595. }
  596. #ifdef CONFIG_X86_LOCAL_APIC
  597. static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
  598. int trigger, int polarity)
  599. {
  600. int irq = gsi;
  601. #ifdef CONFIG_X86_IO_APIC
  602. int node;
  603. struct irq_alloc_info info;
  604. node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
  605. trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
  606. polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
  607. ioapic_set_alloc_attr(&info, node, trigger, polarity);
  608. mutex_lock(&acpi_ioapic_lock);
  609. irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
  610. /* Don't set up the ACPI SCI because it's already set up */
  611. if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
  612. mp_config_acpi_gsi(dev, gsi, trigger, polarity);
  613. mutex_unlock(&acpi_ioapic_lock);
  614. #endif
  615. return irq;
  616. }
  617. static void acpi_unregister_gsi_ioapic(u32 gsi)
  618. {
  619. #ifdef CONFIG_X86_IO_APIC
  620. int irq;
  621. mutex_lock(&acpi_ioapic_lock);
  622. irq = mp_map_gsi_to_irq(gsi, 0, NULL);
  623. if (irq > 0)
  624. mp_unmap_irq(irq);
  625. mutex_unlock(&acpi_ioapic_lock);
  626. #endif
  627. }
  628. #endif
  629. int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
  630. int trigger, int polarity) = acpi_register_gsi_pic;
  631. void (*__acpi_unregister_gsi)(u32 gsi) = NULL;
  632. #ifdef CONFIG_ACPI_SLEEP
  633. int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel;
  634. #else
  635. int (*acpi_suspend_lowlevel)(void);
  636. #endif
  637. /*
  638. * success: return IRQ number (>=0)
  639. * failure: return < 0
  640. */
  641. int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
  642. {
  643. return __acpi_register_gsi(dev, gsi, trigger, polarity);
  644. }
  645. EXPORT_SYMBOL_GPL(acpi_register_gsi);
  646. void acpi_unregister_gsi(u32 gsi)
  647. {
  648. if (__acpi_unregister_gsi)
  649. __acpi_unregister_gsi(gsi);
  650. }
  651. EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
  652. #ifdef CONFIG_X86_LOCAL_APIC
  653. static void __init acpi_set_irq_model_ioapic(void)
  654. {
  655. acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
  656. __acpi_register_gsi = acpi_register_gsi_ioapic;
  657. __acpi_unregister_gsi = acpi_unregister_gsi_ioapic;
  658. acpi_ioapic = 1;
  659. }
  660. #endif
  661. /*
  662. * ACPI based hotplug support for CPU
  663. */
  664. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  665. #include <acpi/processor.h>
  666. static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
  667. {
  668. #ifdef CONFIG_ACPI_NUMA
  669. int nid;
  670. nid = acpi_get_node(handle);
  671. if (nid != NUMA_NO_NODE) {
  672. set_apicid_to_node(physid, nid);
  673. numa_set_node(cpu, nid);
  674. }
  675. #endif
  676. return 0;
  677. }
  678. int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
  679. int *pcpu)
  680. {
  681. int cpu;
  682. cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
  683. if (cpu < 0) {
  684. pr_info("Unable to map lapic to logical cpu number\n");
  685. return cpu;
  686. }
  687. acpi_processor_set_pdc(handle);
  688. acpi_map_cpu2node(handle, cpu, physid);
  689. *pcpu = cpu;
  690. return 0;
  691. }
  692. EXPORT_SYMBOL(acpi_map_cpu);
  693. int acpi_unmap_cpu(int cpu)
  694. {
  695. #ifdef CONFIG_ACPI_NUMA
  696. set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
  697. #endif
  698. per_cpu(x86_cpu_to_apicid, cpu) = -1;
  699. set_cpu_present(cpu, false);
  700. num_processors--;
  701. return (0);
  702. }
  703. EXPORT_SYMBOL(acpi_unmap_cpu);
  704. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  705. int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
  706. {
  707. int ret = -ENOSYS;
  708. #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
  709. int ioapic_id;
  710. u64 addr;
  711. struct ioapic_domain_cfg cfg = {
  712. .type = IOAPIC_DOMAIN_DYNAMIC,
  713. .ops = &mp_ioapic_irqdomain_ops,
  714. };
  715. ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
  716. if (ioapic_id < 0) {
  717. unsigned long long uid;
  718. acpi_status status;
  719. status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
  720. NULL, &uid);
  721. if (ACPI_FAILURE(status)) {
  722. acpi_handle_warn(handle, "failed to get IOAPIC ID.\n");
  723. return -EINVAL;
  724. }
  725. ioapic_id = (int)uid;
  726. }
  727. mutex_lock(&acpi_ioapic_lock);
  728. ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg);
  729. mutex_unlock(&acpi_ioapic_lock);
  730. #endif
  731. return ret;
  732. }
  733. EXPORT_SYMBOL(acpi_register_ioapic);
  734. int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  735. {
  736. int ret = -ENOSYS;
  737. #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
  738. mutex_lock(&acpi_ioapic_lock);
  739. ret = mp_unregister_ioapic(gsi_base);
  740. mutex_unlock(&acpi_ioapic_lock);
  741. #endif
  742. return ret;
  743. }
  744. EXPORT_SYMBOL(acpi_unregister_ioapic);
  745. /**
  746. * acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base
  747. * has been registered
  748. * @handle: ACPI handle of the IOAPIC device
  749. * @gsi_base: GSI base associated with the IOAPIC
  750. *
  751. * Assume caller holds some type of lock to serialize acpi_ioapic_registered()
  752. * with acpi_register_ioapic()/acpi_unregister_ioapic().
  753. */
  754. int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
  755. {
  756. int ret = 0;
  757. #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
  758. mutex_lock(&acpi_ioapic_lock);
  759. ret = mp_ioapic_registered(gsi_base);
  760. mutex_unlock(&acpi_ioapic_lock);
  761. #endif
  762. return ret;
  763. }
  764. static int __init acpi_parse_sbf(struct acpi_table_header *table)
  765. {
  766. struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
  767. sbf_port = sb->cmos_index; /* Save CMOS port */
  768. return 0;
  769. }
  770. #ifdef CONFIG_HPET_TIMER
  771. #include <asm/hpet.h>
  772. static struct resource *hpet_res __initdata;
  773. static int __init acpi_parse_hpet(struct acpi_table_header *table)
  774. {
  775. struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
  776. if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
  777. pr_warn("HPET timers must be located in memory.\n");
  778. return -1;
  779. }
  780. hpet_address = hpet_tbl->address.address;
  781. hpet_blockid = hpet_tbl->sequence;
  782. /*
  783. * Some broken BIOSes advertise HPET at 0x0. We really do not
  784. * want to allocate a resource there.
  785. */
  786. if (!hpet_address) {
  787. pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
  788. return 0;
  789. }
  790. #ifdef CONFIG_X86_64
  791. /*
  792. * Some even more broken BIOSes advertise HPET at
  793. * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
  794. * some noise:
  795. */
  796. if (hpet_address == 0xfed0000000000000UL) {
  797. if (!hpet_force_user) {
  798. pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
  799. hpet_tbl->id);
  800. hpet_address = 0;
  801. return 0;
  802. }
  803. pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
  804. hpet_tbl->id);
  805. hpet_address >>= 32;
  806. }
  807. #endif
  808. pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
  809. /*
  810. * Allocate and initialize the HPET firmware resource for adding into
  811. * the resource tree during the lateinit timeframe.
  812. */
  813. #define HPET_RESOURCE_NAME_SIZE 9
  814. hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
  815. SMP_CACHE_BYTES);
  816. if (!hpet_res)
  817. panic("%s: Failed to allocate %zu bytes\n", __func__,
  818. sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
  819. hpet_res->name = (void *)&hpet_res[1];
  820. hpet_res->flags = IORESOURCE_MEM;
  821. snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
  822. hpet_tbl->sequence);
  823. hpet_res->start = hpet_address;
  824. hpet_res->end = hpet_address + (1 * 1024) - 1;
  825. return 0;
  826. }
  827. /*
  828. * hpet_insert_resource inserts the HPET resources used into the resource
  829. * tree.
  830. */
  831. static __init int hpet_insert_resource(void)
  832. {
  833. if (!hpet_res)
  834. return 1;
  835. return insert_resource(&iomem_resource, hpet_res);
  836. }
  837. late_initcall(hpet_insert_resource);
  838. #else
  839. #define acpi_parse_hpet NULL
  840. #endif
  841. static int __init acpi_parse_fadt(struct acpi_table_header *table)
  842. {
  843. if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
  844. pr_debug("no legacy devices present\n");
  845. x86_platform.legacy.devices.pnpbios = 0;
  846. }
  847. if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
  848. !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
  849. x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
  850. pr_debug("i8042 controller is absent\n");
  851. x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
  852. }
  853. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
  854. pr_debug("not registering RTC platform device\n");
  855. x86_platform.legacy.rtc = 0;
  856. }
  857. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
  858. pr_debug("probing for VGA not safe\n");
  859. x86_platform.legacy.no_vga = 1;
  860. }
  861. #ifdef CONFIG_X86_PM_TIMER
  862. /* detect the location of the ACPI PM Timer */
  863. if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
  864. /* FADT rev. 2 */
  865. if (acpi_gbl_FADT.xpm_timer_block.space_id !=
  866. ACPI_ADR_SPACE_SYSTEM_IO)
  867. return 0;
  868. pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
  869. /*
  870. * "X" fields are optional extensions to the original V1.0
  871. * fields, so we must selectively expand V1.0 fields if the
  872. * corresponding X field is zero.
  873. */
  874. if (!pmtmr_ioport)
  875. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  876. } else {
  877. /* FADT rev. 1 */
  878. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  879. }
  880. if (pmtmr_ioport)
  881. pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
  882. #endif
  883. return 0;
  884. }
  885. #ifdef CONFIG_X86_LOCAL_APIC
  886. /*
  887. * Parse LAPIC entries in MADT
  888. * returns 0 on success, < 0 on error
  889. */
  890. static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
  891. {
  892. int count;
  893. if (!boot_cpu_has(X86_FEATURE_APIC))
  894. return -ENODEV;
  895. /*
  896. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  897. * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
  898. */
  899. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  900. acpi_parse_lapic_addr_ovr, 0);
  901. if (count < 0) {
  902. pr_err("Error parsing LAPIC address override entry\n");
  903. return count;
  904. }
  905. register_lapic_address(acpi_lapic_addr);
  906. return count;
  907. }
  908. static int __init acpi_parse_madt_lapic_entries(void)
  909. {
  910. int count;
  911. int x2count = 0;
  912. int ret;
  913. struct acpi_subtable_proc madt_proc[2];
  914. if (!boot_cpu_has(X86_FEATURE_APIC))
  915. return -ENODEV;
  916. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
  917. acpi_parse_sapic, MAX_LOCAL_APIC);
  918. if (!count) {
  919. memset(madt_proc, 0, sizeof(madt_proc));
  920. madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
  921. madt_proc[0].handler = acpi_parse_lapic;
  922. madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
  923. madt_proc[1].handler = acpi_parse_x2apic;
  924. ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
  925. sizeof(struct acpi_table_madt),
  926. madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
  927. if (ret < 0) {
  928. pr_err("Error parsing LAPIC/X2APIC entries\n");
  929. return ret;
  930. }
  931. count = madt_proc[0].count;
  932. x2count = madt_proc[1].count;
  933. }
  934. if (!count && !x2count) {
  935. pr_err("No LAPIC entries present\n");
  936. /* TBD: Cleanup to allow fallback to MPS */
  937. return -ENODEV;
  938. } else if (count < 0 || x2count < 0) {
  939. pr_err("Error parsing LAPIC entry\n");
  940. /* TBD: Cleanup to allow fallback to MPS */
  941. return count;
  942. }
  943. x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
  944. acpi_parse_x2apic_nmi, 0);
  945. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
  946. acpi_parse_lapic_nmi, 0);
  947. if (count < 0 || x2count < 0) {
  948. pr_err("Error parsing LAPIC NMI entry\n");
  949. /* TBD: Cleanup to allow fallback to MPS */
  950. return count;
  951. }
  952. return 0;
  953. }
  954. #ifdef CONFIG_X86_64
  955. static int __init acpi_parse_mp_wake(union acpi_subtable_headers *header,
  956. const unsigned long end)
  957. {
  958. struct acpi_madt_multiproc_wakeup *mp_wake;
  959. if (!IS_ENABLED(CONFIG_SMP))
  960. return -ENODEV;
  961. mp_wake = (struct acpi_madt_multiproc_wakeup *)header;
  962. if (BAD_MADT_ENTRY(mp_wake, end))
  963. return -EINVAL;
  964. acpi_table_print_madt_entry(&header->common);
  965. acpi_mp_wake_mailbox_paddr = mp_wake->base_address;
  966. acpi_wake_cpu_handler_update(acpi_wakeup_cpu);
  967. return 0;
  968. }
  969. #endif /* CONFIG_X86_64 */
  970. #endif /* CONFIG_X86_LOCAL_APIC */
  971. #ifdef CONFIG_X86_IO_APIC
  972. static void __init mp_config_acpi_legacy_irqs(void)
  973. {
  974. int i;
  975. struct mpc_intsrc mp_irq;
  976. #ifdef CONFIG_EISA
  977. /*
  978. * Fabricate the legacy ISA bus (bus #31).
  979. */
  980. mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
  981. #endif
  982. set_bit(MP_ISA_BUS, mp_bus_not_pci);
  983. pr_debug("Bus #%d is ISA (nIRQs: %d)\n", MP_ISA_BUS, nr_legacy_irqs());
  984. /*
  985. * Use the default configuration for the IRQs 0-15. Unless
  986. * overridden by (MADT) interrupt source override entries.
  987. */
  988. for (i = 0; i < nr_legacy_irqs(); i++) {
  989. int ioapic, pin;
  990. unsigned int dstapic;
  991. int idx;
  992. u32 gsi;
  993. /* Locate the gsi that irq i maps to. */
  994. if (acpi_isa_irq_to_gsi(i, &gsi))
  995. continue;
  996. /*
  997. * Locate the IOAPIC that manages the ISA IRQ.
  998. */
  999. ioapic = mp_find_ioapic(gsi);
  1000. if (ioapic < 0)
  1001. continue;
  1002. pin = mp_find_ioapic_pin(ioapic, gsi);
  1003. dstapic = mpc_ioapic_id(ioapic);
  1004. for (idx = 0; idx < mp_irq_entries; idx++) {
  1005. struct mpc_intsrc *irq = mp_irqs + idx;
  1006. /* Do we already have a mapping for this ISA IRQ? */
  1007. if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
  1008. break;
  1009. /* Do we already have a mapping for this IOAPIC pin */
  1010. if (irq->dstapic == dstapic && irq->dstirq == pin)
  1011. break;
  1012. }
  1013. if (idx != mp_irq_entries) {
  1014. pr_debug("ACPI: IRQ%d used by override.\n", i);
  1015. continue; /* IRQ already used */
  1016. }
  1017. mp_irq.type = MP_INTSRC;
  1018. mp_irq.irqflag = 0; /* Conforming */
  1019. mp_irq.srcbus = MP_ISA_BUS;
  1020. mp_irq.dstapic = dstapic;
  1021. mp_irq.irqtype = mp_INT;
  1022. mp_irq.srcbusirq = i; /* Identity mapped */
  1023. mp_irq.dstirq = pin;
  1024. mp_save_irq(&mp_irq);
  1025. }
  1026. }
  1027. /*
  1028. * Parse IOAPIC related entries in MADT
  1029. * returns 0 on success, < 0 on error
  1030. */
  1031. static int __init acpi_parse_madt_ioapic_entries(void)
  1032. {
  1033. int count;
  1034. /*
  1035. * ACPI interpreter is required to complete interrupt setup,
  1036. * so if it is off, don't enumerate the io-apics with ACPI.
  1037. * If MPS is present, it will handle them,
  1038. * otherwise the system will stay in PIC mode
  1039. */
  1040. if (acpi_disabled || acpi_noirq)
  1041. return -ENODEV;
  1042. if (!boot_cpu_has(X86_FEATURE_APIC))
  1043. return -ENODEV;
  1044. /*
  1045. * if "noapic" boot option, don't look for IO-APICs
  1046. */
  1047. if (skip_ioapic_setup) {
  1048. pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
  1049. return -ENODEV;
  1050. }
  1051. count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
  1052. MAX_IO_APICS);
  1053. if (!count) {
  1054. pr_err("No IOAPIC entries present\n");
  1055. return -ENODEV;
  1056. } else if (count < 0) {
  1057. pr_err("Error parsing IOAPIC entry\n");
  1058. return count;
  1059. }
  1060. count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
  1061. acpi_parse_int_src_ovr, nr_irqs);
  1062. if (count < 0) {
  1063. pr_err("Error parsing interrupt source overrides entry\n");
  1064. /* TBD: Cleanup to allow fallback to MPS */
  1065. return count;
  1066. }
  1067. /*
  1068. * If BIOS did not supply an INT_SRC_OVR for the SCI
  1069. * pretend we got one so we can set the SCI flags.
  1070. * But ignore setting up SCI on hardware reduced platforms.
  1071. */
  1072. if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
  1073. acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
  1074. acpi_gbl_FADT.sci_interrupt);
  1075. /* Fill in identity legacy mappings where no override */
  1076. mp_config_acpi_legacy_irqs();
  1077. count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
  1078. acpi_parse_nmi_src, nr_irqs);
  1079. if (count < 0) {
  1080. pr_err("Error parsing NMI SRC entry\n");
  1081. /* TBD: Cleanup to allow fallback to MPS */
  1082. return count;
  1083. }
  1084. return 0;
  1085. }
  1086. #else
  1087. static inline int acpi_parse_madt_ioapic_entries(void)
  1088. {
  1089. return -1;
  1090. }
  1091. #endif /* !CONFIG_X86_IO_APIC */
  1092. static void __init early_acpi_process_madt(void)
  1093. {
  1094. #ifdef CONFIG_X86_LOCAL_APIC
  1095. int error;
  1096. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1097. /*
  1098. * Parse MADT LAPIC entries
  1099. */
  1100. error = early_acpi_parse_madt_lapic_addr_ovr();
  1101. if (!error) {
  1102. acpi_lapic = 1;
  1103. smp_found_config = 1;
  1104. }
  1105. if (error == -EINVAL) {
  1106. /*
  1107. * Dell Precision Workstation 410, 610 come here.
  1108. */
  1109. pr_err("Invalid BIOS MADT, disabling ACPI\n");
  1110. disable_acpi();
  1111. }
  1112. }
  1113. #endif
  1114. }
  1115. static void __init acpi_process_madt(void)
  1116. {
  1117. #ifdef CONFIG_X86_LOCAL_APIC
  1118. int error;
  1119. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1120. /*
  1121. * Parse MADT LAPIC entries
  1122. */
  1123. error = acpi_parse_madt_lapic_entries();
  1124. if (!error) {
  1125. acpi_lapic = 1;
  1126. /*
  1127. * Parse MADT IO-APIC entries
  1128. */
  1129. mutex_lock(&acpi_ioapic_lock);
  1130. error = acpi_parse_madt_ioapic_entries();
  1131. mutex_unlock(&acpi_ioapic_lock);
  1132. if (!error) {
  1133. acpi_set_irq_model_ioapic();
  1134. smp_found_config = 1;
  1135. }
  1136. #ifdef CONFIG_X86_64
  1137. /*
  1138. * Parse MADT MP Wake entry.
  1139. */
  1140. acpi_table_parse_madt(ACPI_MADT_TYPE_MULTIPROC_WAKEUP,
  1141. acpi_parse_mp_wake, 1);
  1142. #endif
  1143. }
  1144. if (error == -EINVAL) {
  1145. /*
  1146. * Dell Precision Workstation 410, 610 come here.
  1147. */
  1148. pr_err("Invalid BIOS MADT, disabling ACPI\n");
  1149. disable_acpi();
  1150. }
  1151. } else {
  1152. /*
  1153. * ACPI found no MADT, and so ACPI wants UP PIC mode.
  1154. * In the event an MPS table was found, forget it.
  1155. * Boot with "acpi=off" to use MPS on such a system.
  1156. */
  1157. if (smp_found_config) {
  1158. pr_warn("No APIC-table, disabling MPS\n");
  1159. smp_found_config = 0;
  1160. }
  1161. }
  1162. /*
  1163. * ACPI supports both logical (e.g. Hyper-Threading) and physical
  1164. * processors, where MPS only supports physical.
  1165. */
  1166. if (acpi_lapic && acpi_ioapic)
  1167. pr_info("Using ACPI (MADT) for SMP configuration information\n");
  1168. else if (acpi_lapic)
  1169. pr_info("Using ACPI for processor (LAPIC) configuration information\n");
  1170. #endif
  1171. return;
  1172. }
  1173. static int __init disable_acpi_irq(const struct dmi_system_id *d)
  1174. {
  1175. if (!acpi_force) {
  1176. pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
  1177. acpi_noirq_set();
  1178. }
  1179. return 0;
  1180. }
  1181. static int __init disable_acpi_pci(const struct dmi_system_id *d)
  1182. {
  1183. if (!acpi_force) {
  1184. pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
  1185. acpi_disable_pci();
  1186. }
  1187. return 0;
  1188. }
  1189. static int __init disable_acpi_xsdt(const struct dmi_system_id *d)
  1190. {
  1191. if (!acpi_force) {
  1192. pr_notice("%s detected: force use of acpi=rsdt\n", d->ident);
  1193. acpi_gbl_do_not_use_xsdt = TRUE;
  1194. } else {
  1195. pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n");
  1196. }
  1197. return 0;
  1198. }
  1199. static int __init dmi_disable_acpi(const struct dmi_system_id *d)
  1200. {
  1201. if (!acpi_force) {
  1202. pr_notice("%s detected: acpi off\n", d->ident);
  1203. disable_acpi();
  1204. } else {
  1205. pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
  1206. }
  1207. return 0;
  1208. }
  1209. /*
  1210. * Force ignoring BIOS IRQ0 override
  1211. */
  1212. static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
  1213. {
  1214. if (!acpi_skip_timer_override) {
  1215. pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
  1216. d->ident);
  1217. acpi_skip_timer_override = 1;
  1218. }
  1219. return 0;
  1220. }
  1221. /*
  1222. * ACPI offers an alternative platform interface model that removes
  1223. * ACPI hardware requirements for platforms that do not implement
  1224. * the PC Architecture.
  1225. *
  1226. * We initialize the Hardware-reduced ACPI model here:
  1227. */
  1228. void __init acpi_generic_reduced_hw_init(void)
  1229. {
  1230. /*
  1231. * Override x86_init functions and bypass legacy PIC in
  1232. * hardware reduced ACPI mode.
  1233. */
  1234. x86_init.timers.timer_init = x86_init_noop;
  1235. x86_init.irqs.pre_vector_init = x86_init_noop;
  1236. legacy_pic = &null_legacy_pic;
  1237. }
  1238. static void __init acpi_reduced_hw_init(void)
  1239. {
  1240. if (acpi_gbl_reduced_hardware)
  1241. x86_init.acpi.reduced_hw_early_init();
  1242. }
  1243. /*
  1244. * If your system is blacklisted here, but you find that acpi=force
  1245. * works for you, please contact [email protected]
  1246. */
  1247. static const struct dmi_system_id acpi_dmi_table[] __initconst = {
  1248. /*
  1249. * Boxes that need ACPI disabled
  1250. */
  1251. {
  1252. .callback = dmi_disable_acpi,
  1253. .ident = "IBM Thinkpad",
  1254. .matches = {
  1255. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1256. DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
  1257. },
  1258. },
  1259. /*
  1260. * Boxes that need ACPI PCI IRQ routing disabled
  1261. */
  1262. {
  1263. .callback = disable_acpi_irq,
  1264. .ident = "ASUS A7V",
  1265. .matches = {
  1266. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
  1267. DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
  1268. /* newer BIOS, Revision 1011, does work */
  1269. DMI_MATCH(DMI_BIOS_VERSION,
  1270. "ASUS A7V ACPI BIOS Revision 1007"),
  1271. },
  1272. },
  1273. {
  1274. /*
  1275. * Latest BIOS for IBM 600E (1.16) has bad pcinum
  1276. * for LPC bridge, which is needed for the PCI
  1277. * interrupt links to work. DSDT fix is in bug 5966.
  1278. * 2645, 2646 model numbers are shared with 600/600E/600X
  1279. */
  1280. .callback = disable_acpi_irq,
  1281. .ident = "IBM Thinkpad 600 Series 2645",
  1282. .matches = {
  1283. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1284. DMI_MATCH(DMI_BOARD_NAME, "2645"),
  1285. },
  1286. },
  1287. {
  1288. .callback = disable_acpi_irq,
  1289. .ident = "IBM Thinkpad 600 Series 2646",
  1290. .matches = {
  1291. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1292. DMI_MATCH(DMI_BOARD_NAME, "2646"),
  1293. },
  1294. },
  1295. /*
  1296. * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
  1297. */
  1298. { /* _BBN 0 bug */
  1299. .callback = disable_acpi_pci,
  1300. .ident = "ASUS PR-DLS",
  1301. .matches = {
  1302. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1303. DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
  1304. DMI_MATCH(DMI_BIOS_VERSION,
  1305. "ASUS PR-DLS ACPI BIOS Revision 1010"),
  1306. DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
  1307. },
  1308. },
  1309. {
  1310. .callback = disable_acpi_pci,
  1311. .ident = "Acer TravelMate 36x Laptop",
  1312. .matches = {
  1313. DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
  1314. DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
  1315. },
  1316. },
  1317. /*
  1318. * Boxes that need ACPI XSDT use disabled due to corrupted tables
  1319. */
  1320. {
  1321. .callback = disable_acpi_xsdt,
  1322. .ident = "Advantech DAC-BJ01",
  1323. .matches = {
  1324. DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
  1325. DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"),
  1326. DMI_MATCH(DMI_BIOS_VERSION, "V1.12"),
  1327. DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"),
  1328. },
  1329. },
  1330. {}
  1331. };
  1332. /* second table for DMI checks that should run after early-quirks */
  1333. static const struct dmi_system_id acpi_dmi_table_late[] __initconst = {
  1334. /*
  1335. * HP laptops which use a DSDT reporting as HP/SB400/10000,
  1336. * which includes some code which overrides all temperature
  1337. * trip points to 16C if the INTIN2 input of the I/O APIC
  1338. * is enabled. This input is incorrectly designated the
  1339. * ISA IRQ 0 via an interrupt source override even though
  1340. * it is wired to the output of the master 8259A and INTIN0
  1341. * is not connected at all. Force ignoring BIOS IRQ0
  1342. * override in that cases.
  1343. */
  1344. {
  1345. .callback = dmi_ignore_irq0_timer_override,
  1346. .ident = "HP nx6115 laptop",
  1347. .matches = {
  1348. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1349. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
  1350. },
  1351. },
  1352. {
  1353. .callback = dmi_ignore_irq0_timer_override,
  1354. .ident = "HP NX6125 laptop",
  1355. .matches = {
  1356. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1357. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"),
  1358. },
  1359. },
  1360. {
  1361. .callback = dmi_ignore_irq0_timer_override,
  1362. .ident = "HP NX6325 laptop",
  1363. .matches = {
  1364. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1365. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
  1366. },
  1367. },
  1368. {
  1369. .callback = dmi_ignore_irq0_timer_override,
  1370. .ident = "HP 6715b laptop",
  1371. .matches = {
  1372. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1373. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
  1374. },
  1375. },
  1376. {
  1377. .callback = dmi_ignore_irq0_timer_override,
  1378. .ident = "FUJITSU SIEMENS",
  1379. .matches = {
  1380. DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
  1381. DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
  1382. },
  1383. },
  1384. {}
  1385. };
  1386. /*
  1387. * acpi_boot_table_init() and acpi_boot_init()
  1388. * called from setup_arch(), always.
  1389. * 1. checksums all tables
  1390. * 2. enumerates lapics
  1391. * 3. enumerates io-apics
  1392. *
  1393. * acpi_table_init() is separate to allow reading SRAT without
  1394. * other side effects.
  1395. *
  1396. * side effects of acpi_boot_init:
  1397. * acpi_lapic = 1 if LAPIC found
  1398. * acpi_ioapic = 1 if IOAPIC found
  1399. * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
  1400. * if acpi_blacklisted() acpi_disabled = 1;
  1401. * acpi_irq_model=...
  1402. * ...
  1403. */
  1404. void __init acpi_boot_table_init(void)
  1405. {
  1406. dmi_check_system(acpi_dmi_table);
  1407. /*
  1408. * If acpi_disabled, bail out
  1409. */
  1410. if (acpi_disabled)
  1411. return;
  1412. /*
  1413. * Initialize the ACPI boot-time table parser.
  1414. */
  1415. if (acpi_locate_initial_tables())
  1416. disable_acpi();
  1417. else
  1418. acpi_reserve_initial_tables();
  1419. }
  1420. int __init early_acpi_boot_init(void)
  1421. {
  1422. if (acpi_disabled)
  1423. return 1;
  1424. acpi_table_init_complete();
  1425. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1426. /*
  1427. * blacklist may disable ACPI entirely
  1428. */
  1429. if (acpi_blacklisted()) {
  1430. if (acpi_force) {
  1431. pr_warn("acpi=force override\n");
  1432. } else {
  1433. pr_warn("Disabling ACPI support\n");
  1434. disable_acpi();
  1435. return 1;
  1436. }
  1437. }
  1438. /*
  1439. * Process the Multiple APIC Description Table (MADT), if present
  1440. */
  1441. early_acpi_process_madt();
  1442. /*
  1443. * Hardware-reduced ACPI mode initialization:
  1444. */
  1445. acpi_reduced_hw_init();
  1446. return 0;
  1447. }
  1448. int __init acpi_boot_init(void)
  1449. {
  1450. /* those are executed after early-quirks are executed */
  1451. dmi_check_system(acpi_dmi_table_late);
  1452. /*
  1453. * If acpi_disabled, bail out
  1454. */
  1455. if (acpi_disabled)
  1456. return 1;
  1457. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1458. /*
  1459. * set sci_int and PM timer address
  1460. */
  1461. acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
  1462. /*
  1463. * Process the Multiple APIC Description Table (MADT), if present
  1464. */
  1465. acpi_process_madt();
  1466. acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
  1467. if (IS_ENABLED(CONFIG_ACPI_BGRT) && !acpi_nobgrt)
  1468. acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
  1469. if (!acpi_noirq)
  1470. x86_init.pci.init = pci_acpi_init;
  1471. /* Do not enable ACPI SPCR console by default */
  1472. acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
  1473. return 0;
  1474. }
  1475. static int __init parse_acpi(char *arg)
  1476. {
  1477. if (!arg)
  1478. return -EINVAL;
  1479. /* "acpi=off" disables both ACPI table parsing and interpreter */
  1480. if (strcmp(arg, "off") == 0) {
  1481. disable_acpi();
  1482. }
  1483. /* acpi=force to over-ride black-list */
  1484. else if (strcmp(arg, "force") == 0) {
  1485. acpi_force = 1;
  1486. acpi_disabled = 0;
  1487. }
  1488. /* acpi=strict disables out-of-spec workarounds */
  1489. else if (strcmp(arg, "strict") == 0) {
  1490. acpi_strict = 1;
  1491. }
  1492. /* acpi=rsdt use RSDT instead of XSDT */
  1493. else if (strcmp(arg, "rsdt") == 0) {
  1494. acpi_gbl_do_not_use_xsdt = TRUE;
  1495. }
  1496. /* "acpi=noirq" disables ACPI interrupt routing */
  1497. else if (strcmp(arg, "noirq") == 0) {
  1498. acpi_noirq_set();
  1499. }
  1500. /* "acpi=copy_dsdt" copies DSDT */
  1501. else if (strcmp(arg, "copy_dsdt") == 0) {
  1502. acpi_gbl_copy_dsdt_locally = 1;
  1503. }
  1504. /* "acpi=nocmcff" disables FF mode for corrected errors */
  1505. else if (strcmp(arg, "nocmcff") == 0) {
  1506. acpi_disable_cmcff = 1;
  1507. } else {
  1508. /* Core will printk when we return error. */
  1509. return -EINVAL;
  1510. }
  1511. return 0;
  1512. }
  1513. early_param("acpi", parse_acpi);
  1514. static int __init parse_acpi_bgrt(char *arg)
  1515. {
  1516. acpi_nobgrt = true;
  1517. return 0;
  1518. }
  1519. early_param("bgrt_disable", parse_acpi_bgrt);
  1520. /* FIXME: Using pci= for an ACPI parameter is a travesty. */
  1521. static int __init parse_pci(char *arg)
  1522. {
  1523. if (arg && strcmp(arg, "noacpi") == 0)
  1524. acpi_disable_pci();
  1525. return 0;
  1526. }
  1527. early_param("pci", parse_pci);
  1528. int __init acpi_mps_check(void)
  1529. {
  1530. #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
  1531. /* mptable code is not built-in*/
  1532. if (acpi_disabled || acpi_noirq) {
  1533. pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
  1534. return 1;
  1535. }
  1536. #endif
  1537. return 0;
  1538. }
  1539. #ifdef CONFIG_X86_IO_APIC
  1540. static int __init parse_acpi_skip_timer_override(char *arg)
  1541. {
  1542. acpi_skip_timer_override = 1;
  1543. return 0;
  1544. }
  1545. early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
  1546. static int __init parse_acpi_use_timer_override(char *arg)
  1547. {
  1548. acpi_use_timer_override = 1;
  1549. return 0;
  1550. }
  1551. early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
  1552. #endif /* CONFIG_X86_IO_APIC */
  1553. static int __init setup_acpi_sci(char *s)
  1554. {
  1555. if (!s)
  1556. return -EINVAL;
  1557. if (!strcmp(s, "edge"))
  1558. acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
  1559. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1560. else if (!strcmp(s, "level"))
  1561. acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
  1562. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1563. else if (!strcmp(s, "high"))
  1564. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
  1565. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1566. else if (!strcmp(s, "low"))
  1567. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
  1568. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1569. else
  1570. return -EINVAL;
  1571. return 0;
  1572. }
  1573. early_param("acpi_sci", setup_acpi_sci);
  1574. int __acpi_acquire_global_lock(unsigned int *lock)
  1575. {
  1576. unsigned int old, new, val;
  1577. do {
  1578. old = *lock;
  1579. new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
  1580. val = cmpxchg(lock, old, new);
  1581. } while (unlikely (val != old));
  1582. return ((new & 0x3) < 3) ? -1 : 0;
  1583. }
  1584. int __acpi_release_global_lock(unsigned int *lock)
  1585. {
  1586. unsigned int old, new, val;
  1587. do {
  1588. old = *lock;
  1589. new = old & ~0x3;
  1590. val = cmpxchg(lock, old, new);
  1591. } while (unlikely (val != old));
  1592. return old & 0x1;
  1593. }
  1594. void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
  1595. {
  1596. e820__range_add(addr, size, E820_TYPE_NVS);
  1597. e820__update_table_print();
  1598. }
  1599. void x86_default_set_root_pointer(u64 addr)
  1600. {
  1601. boot_params.acpi_rsdp_addr = addr;
  1602. }
  1603. u64 x86_default_get_root_pointer(void)
  1604. {
  1605. return boot_params.acpi_rsdp_addr;
  1606. }