acpi_processor.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * acpi_processor.c - ACPI processor enumeration support
  4. *
  5. * Copyright (C) 2001, 2002 Andy Grover <[email protected]>
  6. * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
  7. * Copyright (C) 2004 Dominik Brodowski <[email protected]>
  8. * Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
  9. * Copyright (C) 2013, Intel Corporation
  10. * Rafael J. Wysocki <[email protected]>
  11. */
  12. #include <linux/acpi.h>
  13. #include <linux/device.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/pci.h>
  17. #include <acpi/processor.h>
  18. #include <asm/cpu.h>
  19. #include "internal.h"
  20. DEFINE_PER_CPU(struct acpi_processor *, processors);
  21. EXPORT_PER_CPU_SYMBOL(processors);
  22. /* Errata Handling */
  23. struct acpi_processor_errata errata __read_mostly;
  24. EXPORT_SYMBOL_GPL(errata);
  25. static int acpi_processor_errata_piix4(struct pci_dev *dev)
  26. {
  27. u8 value1 = 0;
  28. u8 value2 = 0;
  29. if (!dev)
  30. return -EINVAL;
  31. /*
  32. * Note that 'dev' references the PIIX4 ACPI Controller.
  33. */
  34. switch (dev->revision) {
  35. case 0:
  36. dev_dbg(&dev->dev, "Found PIIX4 A-step\n");
  37. break;
  38. case 1:
  39. dev_dbg(&dev->dev, "Found PIIX4 B-step\n");
  40. break;
  41. case 2:
  42. dev_dbg(&dev->dev, "Found PIIX4E\n");
  43. break;
  44. case 3:
  45. dev_dbg(&dev->dev, "Found PIIX4M\n");
  46. break;
  47. default:
  48. dev_dbg(&dev->dev, "Found unknown PIIX4\n");
  49. break;
  50. }
  51. switch (dev->revision) {
  52. case 0: /* PIIX4 A-step */
  53. case 1: /* PIIX4 B-step */
  54. /*
  55. * See specification changes #13 ("Manual Throttle Duty Cycle")
  56. * and #14 ("Enabling and Disabling Manual Throttle"), plus
  57. * erratum #5 ("STPCLK# Deassertion Time") from the January
  58. * 2002 PIIX4 specification update. Applies to only older
  59. * PIIX4 models.
  60. */
  61. errata.piix4.throttle = 1;
  62. fallthrough;
  63. case 2: /* PIIX4E */
  64. case 3: /* PIIX4M */
  65. /*
  66. * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
  67. * Livelock") from the January 2002 PIIX4 specification update.
  68. * Applies to all PIIX4 models.
  69. */
  70. /*
  71. * BM-IDE
  72. * ------
  73. * Find the PIIX4 IDE Controller and get the Bus Master IDE
  74. * Status register address. We'll use this later to read
  75. * each IDE controller's DMA status to make sure we catch all
  76. * DMA activity.
  77. */
  78. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  79. PCI_DEVICE_ID_INTEL_82371AB,
  80. PCI_ANY_ID, PCI_ANY_ID, NULL);
  81. if (dev) {
  82. errata.piix4.bmisx = pci_resource_start(dev, 4);
  83. pci_dev_put(dev);
  84. }
  85. /*
  86. * Type-F DMA
  87. * ----------
  88. * Find the PIIX4 ISA Controller and read the Motherboard
  89. * DMA controller's status to see if Type-F (Fast) DMA mode
  90. * is enabled (bit 7) on either channel. Note that we'll
  91. * disable C3 support if this is enabled, as some legacy
  92. * devices won't operate well if fast DMA is disabled.
  93. */
  94. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  95. PCI_DEVICE_ID_INTEL_82371AB_0,
  96. PCI_ANY_ID, PCI_ANY_ID, NULL);
  97. if (dev) {
  98. pci_read_config_byte(dev, 0x76, &value1);
  99. pci_read_config_byte(dev, 0x77, &value2);
  100. if ((value1 & 0x80) || (value2 & 0x80))
  101. errata.piix4.fdma = 1;
  102. pci_dev_put(dev);
  103. }
  104. break;
  105. }
  106. if (errata.piix4.bmisx)
  107. dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n");
  108. if (errata.piix4.fdma)
  109. dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n");
  110. return 0;
  111. }
  112. static int acpi_processor_errata(void)
  113. {
  114. int result = 0;
  115. struct pci_dev *dev = NULL;
  116. /*
  117. * PIIX4
  118. */
  119. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  120. PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
  121. PCI_ANY_ID, NULL);
  122. if (dev) {
  123. result = acpi_processor_errata_piix4(dev);
  124. pci_dev_put(dev);
  125. }
  126. return result;
  127. }
  128. /* Initialization */
  129. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  130. int __weak acpi_map_cpu(acpi_handle handle,
  131. phys_cpuid_t physid, u32 acpi_id, int *pcpu)
  132. {
  133. return -ENODEV;
  134. }
  135. int __weak acpi_unmap_cpu(int cpu)
  136. {
  137. return -ENODEV;
  138. }
  139. int __weak arch_register_cpu(int cpu)
  140. {
  141. return -ENODEV;
  142. }
  143. void __weak arch_unregister_cpu(int cpu) {}
  144. static int acpi_processor_hotadd_init(struct acpi_processor *pr)
  145. {
  146. unsigned long long sta;
  147. acpi_status status;
  148. int ret;
  149. if (invalid_phys_cpuid(pr->phys_id))
  150. return -ENODEV;
  151. status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
  152. if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
  153. return -ENODEV;
  154. cpu_maps_update_begin();
  155. cpus_write_lock();
  156. ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
  157. if (ret)
  158. goto out;
  159. ret = arch_register_cpu(pr->id);
  160. if (ret) {
  161. acpi_unmap_cpu(pr->id);
  162. goto out;
  163. }
  164. /*
  165. * CPU got hot-added, but cpu_data is not initialized yet. Set a flag
  166. * to delay cpu_idle/throttling initialization and do it when the CPU
  167. * gets online for the first time.
  168. */
  169. pr_info("CPU%d has been hot-added\n", pr->id);
  170. pr->flags.need_hotplug_init = 1;
  171. out:
  172. cpus_write_unlock();
  173. cpu_maps_update_done();
  174. return ret;
  175. }
  176. #else
  177. static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
  178. {
  179. return -ENODEV;
  180. }
  181. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  182. static int acpi_processor_get_info(struct acpi_device *device)
  183. {
  184. union acpi_object object = { 0 };
  185. struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
  186. struct acpi_processor *pr = acpi_driver_data(device);
  187. int device_declaration = 0;
  188. acpi_status status = AE_OK;
  189. static int cpu0_initialized;
  190. unsigned long long value;
  191. acpi_processor_errata();
  192. /*
  193. * Check to see if we have bus mastering arbitration control. This
  194. * is required for proper C3 usage (to maintain cache coherency).
  195. */
  196. if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
  197. pr->flags.bm_control = 1;
  198. dev_dbg(&device->dev, "Bus mastering arbitration control present\n");
  199. } else
  200. dev_dbg(&device->dev, "No bus mastering arbitration control\n");
  201. if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
  202. /* Declared with "Processor" statement; match ProcessorID */
  203. status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
  204. if (ACPI_FAILURE(status)) {
  205. dev_err(&device->dev,
  206. "Failed to evaluate processor object (0x%x)\n",
  207. status);
  208. return -ENODEV;
  209. }
  210. pr->acpi_id = object.processor.proc_id;
  211. } else {
  212. /*
  213. * Declared with "Device" statement; match _UID.
  214. */
  215. status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
  216. NULL, &value);
  217. if (ACPI_FAILURE(status)) {
  218. dev_err(&device->dev,
  219. "Failed to evaluate processor _UID (0x%x)\n",
  220. status);
  221. return -ENODEV;
  222. }
  223. device_declaration = 1;
  224. pr->acpi_id = value;
  225. }
  226. if (acpi_duplicate_processor_id(pr->acpi_id)) {
  227. if (pr->acpi_id == 0xff)
  228. dev_info_once(&device->dev,
  229. "Entry not well-defined, consider updating BIOS\n");
  230. else
  231. dev_err(&device->dev,
  232. "Failed to get unique processor _UID (0x%x)\n",
  233. pr->acpi_id);
  234. return -ENODEV;
  235. }
  236. pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
  237. pr->acpi_id);
  238. if (invalid_phys_cpuid(pr->phys_id))
  239. dev_dbg(&device->dev, "Failed to get CPU physical ID.\n");
  240. pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
  241. if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
  242. cpu0_initialized = 1;
  243. /*
  244. * Handle UP system running SMP kernel, with no CPU
  245. * entry in MADT
  246. */
  247. if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1))
  248. pr->id = 0;
  249. }
  250. /*
  251. * Extra Processor objects may be enumerated on MP systems with
  252. * less than the max # of CPUs. They should be ignored _iff
  253. * they are physically not present.
  254. *
  255. * NOTE: Even if the processor has a cpuid, it may not be present
  256. * because cpuid <-> apicid mapping is persistent now.
  257. */
  258. if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
  259. int ret = acpi_processor_hotadd_init(pr);
  260. if (ret)
  261. return ret;
  262. }
  263. /*
  264. * On some boxes several processors use the same processor bus id.
  265. * But they are located in different scope. For example:
  266. * \_SB.SCK0.CPU0
  267. * \_SB.SCK1.CPU0
  268. * Rename the processor device bus id. And the new bus id will be
  269. * generated as the following format:
  270. * CPU+CPU ID.
  271. */
  272. sprintf(acpi_device_bid(device), "CPU%X", pr->id);
  273. dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id);
  274. if (!object.processor.pblk_address)
  275. dev_dbg(&device->dev, "No PBLK (NULL address)\n");
  276. else if (object.processor.pblk_length != 6)
  277. dev_err(&device->dev, "Invalid PBLK length [%d]\n",
  278. object.processor.pblk_length);
  279. else {
  280. pr->throttling.address = object.processor.pblk_address;
  281. pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
  282. pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
  283. pr->pblk = object.processor.pblk_address;
  284. }
  285. /*
  286. * If ACPI describes a slot number for this CPU, we can use it to
  287. * ensure we get the right value in the "physical id" field
  288. * of /proc/cpuinfo
  289. */
  290. status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
  291. if (ACPI_SUCCESS(status))
  292. arch_fix_phys_package_id(pr->id, value);
  293. return 0;
  294. }
  295. /*
  296. * Do not put anything in here which needs the core to be online.
  297. * For example MSR access or setting up things which check for cpuinfo_x86
  298. * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
  299. * Such things have to be put in and set up by the processor driver's .probe().
  300. */
  301. static DEFINE_PER_CPU(void *, processor_device_array);
  302. static int acpi_processor_add(struct acpi_device *device,
  303. const struct acpi_device_id *id)
  304. {
  305. struct acpi_processor *pr;
  306. struct device *dev;
  307. int result = 0;
  308. pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
  309. if (!pr)
  310. return -ENOMEM;
  311. if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
  312. result = -ENOMEM;
  313. goto err_free_pr;
  314. }
  315. pr->handle = device->handle;
  316. strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
  317. strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
  318. device->driver_data = pr;
  319. result = acpi_processor_get_info(device);
  320. if (result) /* Processor is not physically present or unavailable */
  321. return 0;
  322. BUG_ON(pr->id >= nr_cpu_ids);
  323. /*
  324. * Buggy BIOS check.
  325. * ACPI id of processors can be reported wrongly by the BIOS.
  326. * Don't trust it blindly
  327. */
  328. if (per_cpu(processor_device_array, pr->id) != NULL &&
  329. per_cpu(processor_device_array, pr->id) != device) {
  330. dev_warn(&device->dev,
  331. "BIOS reported wrong ACPI id %d for the processor\n",
  332. pr->id);
  333. /* Give up, but do not abort the namespace scan. */
  334. goto err;
  335. }
  336. /*
  337. * processor_device_array is not cleared on errors to allow buggy BIOS
  338. * checks.
  339. */
  340. per_cpu(processor_device_array, pr->id) = device;
  341. per_cpu(processors, pr->id) = pr;
  342. dev = get_cpu_device(pr->id);
  343. if (!dev) {
  344. result = -ENODEV;
  345. goto err;
  346. }
  347. result = acpi_bind_one(dev, device);
  348. if (result)
  349. goto err;
  350. pr->dev = dev;
  351. /* Trigger the processor driver's .probe() if present. */
  352. if (device_attach(dev) >= 0)
  353. return 1;
  354. dev_err(dev, "Processor driver could not be attached\n");
  355. acpi_unbind_one(dev);
  356. err:
  357. free_cpumask_var(pr->throttling.shared_cpu_map);
  358. device->driver_data = NULL;
  359. per_cpu(processors, pr->id) = NULL;
  360. err_free_pr:
  361. kfree(pr);
  362. return result;
  363. }
  364. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  365. /* Removal */
  366. static void acpi_processor_remove(struct acpi_device *device)
  367. {
  368. struct acpi_processor *pr;
  369. if (!device || !acpi_driver_data(device))
  370. return;
  371. pr = acpi_driver_data(device);
  372. if (pr->id >= nr_cpu_ids)
  373. goto out;
  374. /*
  375. * The only reason why we ever get here is CPU hot-removal. The CPU is
  376. * already offline and the ACPI device removal locking prevents it from
  377. * being put back online at this point.
  378. *
  379. * Unbind the driver from the processor device and detach it from the
  380. * ACPI companion object.
  381. */
  382. device_release_driver(pr->dev);
  383. acpi_unbind_one(pr->dev);
  384. /* Clean up. */
  385. per_cpu(processor_device_array, pr->id) = NULL;
  386. per_cpu(processors, pr->id) = NULL;
  387. cpu_maps_update_begin();
  388. cpus_write_lock();
  389. /* Remove the CPU. */
  390. arch_unregister_cpu(pr->id);
  391. acpi_unmap_cpu(pr->id);
  392. cpus_write_unlock();
  393. cpu_maps_update_done();
  394. try_offline_node(cpu_to_node(pr->id));
  395. out:
  396. free_cpumask_var(pr->throttling.shared_cpu_map);
  397. kfree(pr);
  398. }
  399. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  400. #ifdef CONFIG_X86
  401. static bool acpi_hwp_native_thermal_lvt_set;
  402. static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
  403. u32 lvl,
  404. void *context,
  405. void **rv)
  406. {
  407. u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
  408. u32 capbuf[2];
  409. struct acpi_osc_context osc_context = {
  410. .uuid_str = sb_uuid_str,
  411. .rev = 1,
  412. .cap.length = 8,
  413. .cap.pointer = capbuf,
  414. };
  415. if (acpi_hwp_native_thermal_lvt_set)
  416. return AE_CTRL_TERMINATE;
  417. capbuf[0] = 0x0000;
  418. capbuf[1] = 0x1000; /* set bit 12 */
  419. if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
  420. if (osc_context.ret.pointer && osc_context.ret.length > 1) {
  421. u32 *capbuf_ret = osc_context.ret.pointer;
  422. if (capbuf_ret[1] & 0x1000) {
  423. acpi_handle_info(handle,
  424. "_OSC native thermal LVT Acked\n");
  425. acpi_hwp_native_thermal_lvt_set = true;
  426. }
  427. }
  428. kfree(osc_context.ret.pointer);
  429. }
  430. return AE_OK;
  431. }
  432. void __init acpi_early_processor_osc(void)
  433. {
  434. if (boot_cpu_has(X86_FEATURE_HWP)) {
  435. acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
  436. ACPI_UINT32_MAX,
  437. acpi_hwp_native_thermal_lvt_osc,
  438. NULL, NULL, NULL);
  439. acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
  440. acpi_hwp_native_thermal_lvt_osc,
  441. NULL, NULL);
  442. }
  443. }
  444. #endif
  445. /*
  446. * The following ACPI IDs are known to be suitable for representing as
  447. * processor devices.
  448. */
  449. static const struct acpi_device_id processor_device_ids[] = {
  450. { ACPI_PROCESSOR_OBJECT_HID, },
  451. { ACPI_PROCESSOR_DEVICE_HID, },
  452. { }
  453. };
  454. static struct acpi_scan_handler processor_handler = {
  455. .ids = processor_device_ids,
  456. .attach = acpi_processor_add,
  457. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  458. .detach = acpi_processor_remove,
  459. #endif
  460. .hotplug = {
  461. .enabled = true,
  462. },
  463. };
  464. static int acpi_processor_container_attach(struct acpi_device *dev,
  465. const struct acpi_device_id *id)
  466. {
  467. return 1;
  468. }
  469. static const struct acpi_device_id processor_container_ids[] = {
  470. { ACPI_PROCESSOR_CONTAINER_HID, },
  471. { }
  472. };
  473. static struct acpi_scan_handler processor_container_handler = {
  474. .ids = processor_container_ids,
  475. .attach = acpi_processor_container_attach,
  476. };
  477. /* The number of the unique processor IDs */
  478. static int nr_unique_ids __initdata;
  479. /* The number of the duplicate processor IDs */
  480. static int nr_duplicate_ids;
  481. /* Used to store the unique processor IDs */
  482. static int unique_processor_ids[] __initdata = {
  483. [0 ... NR_CPUS - 1] = -1,
  484. };
  485. /* Used to store the duplicate processor IDs */
  486. static int duplicate_processor_ids[] = {
  487. [0 ... NR_CPUS - 1] = -1,
  488. };
  489. static void __init processor_validated_ids_update(int proc_id)
  490. {
  491. int i;
  492. if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS)
  493. return;
  494. /*
  495. * Firstly, compare the proc_id with duplicate IDs, if the proc_id is
  496. * already in the IDs, do nothing.
  497. */
  498. for (i = 0; i < nr_duplicate_ids; i++) {
  499. if (duplicate_processor_ids[i] == proc_id)
  500. return;
  501. }
  502. /*
  503. * Secondly, compare the proc_id with unique IDs, if the proc_id is in
  504. * the IDs, put it in the duplicate IDs.
  505. */
  506. for (i = 0; i < nr_unique_ids; i++) {
  507. if (unique_processor_ids[i] == proc_id) {
  508. duplicate_processor_ids[nr_duplicate_ids] = proc_id;
  509. nr_duplicate_ids++;
  510. return;
  511. }
  512. }
  513. /*
  514. * Lastly, the proc_id is a unique ID, put it in the unique IDs.
  515. */
  516. unique_processor_ids[nr_unique_ids] = proc_id;
  517. nr_unique_ids++;
  518. }
  519. static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
  520. u32 lvl,
  521. void *context,
  522. void **rv)
  523. {
  524. acpi_status status;
  525. acpi_object_type acpi_type;
  526. unsigned long long uid;
  527. union acpi_object object = { 0 };
  528. struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
  529. status = acpi_get_type(handle, &acpi_type);
  530. if (ACPI_FAILURE(status))
  531. return status;
  532. switch (acpi_type) {
  533. case ACPI_TYPE_PROCESSOR:
  534. status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
  535. if (ACPI_FAILURE(status))
  536. goto err;
  537. uid = object.processor.proc_id;
  538. break;
  539. case ACPI_TYPE_DEVICE:
  540. status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  541. if (ACPI_FAILURE(status))
  542. goto err;
  543. break;
  544. default:
  545. goto err;
  546. }
  547. processor_validated_ids_update(uid);
  548. return AE_OK;
  549. err:
  550. /* Exit on error, but don't abort the namespace walk */
  551. acpi_handle_info(handle, "Invalid processor object\n");
  552. return AE_OK;
  553. }
  554. static void __init acpi_processor_check_duplicates(void)
  555. {
  556. /* check the correctness for all processors in ACPI namespace */
  557. acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
  558. ACPI_UINT32_MAX,
  559. acpi_processor_ids_walk,
  560. NULL, NULL, NULL);
  561. acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
  562. NULL, NULL);
  563. }
  564. bool acpi_duplicate_processor_id(int proc_id)
  565. {
  566. int i;
  567. /*
  568. * compare the proc_id with duplicate IDs, if the proc_id is already
  569. * in the duplicate IDs, return true, otherwise, return false.
  570. */
  571. for (i = 0; i < nr_duplicate_ids; i++) {
  572. if (duplicate_processor_ids[i] == proc_id)
  573. return true;
  574. }
  575. return false;
  576. }
  577. void __init acpi_processor_init(void)
  578. {
  579. acpi_processor_check_duplicates();
  580. acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
  581. acpi_scan_add_handler(&processor_container_handler);
  582. }
  583. #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  584. /**
  585. * acpi_processor_claim_cst_control - Request _CST control from the platform.
  586. */
  587. bool acpi_processor_claim_cst_control(void)
  588. {
  589. static bool cst_control_claimed;
  590. acpi_status status;
  591. if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
  592. return true;
  593. status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
  594. acpi_gbl_FADT.cst_control, 8);
  595. if (ACPI_FAILURE(status)) {
  596. pr_warn("ACPI: Failed to claim processor _CST control\n");
  597. return false;
  598. }
  599. cst_control_claimed = true;
  600. return true;
  601. }
  602. EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
  603. /**
  604. * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
  605. * @handle: ACPI handle of the processor object containing the _CST.
  606. * @cpu: The numeric ID of the target CPU.
  607. * @info: Object write the C-states information into.
  608. *
  609. * Extract the C-state information for the given CPU from the output of the _CST
  610. * control method under the corresponding ACPI processor object (or processor
  611. * device object) and populate @info with it.
  612. *
  613. * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
  614. * acpi_processor_ffh_cstate_probe() to verify them and update the
  615. * cpu_cstate_entry data for @cpu.
  616. */
  617. int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
  618. struct acpi_processor_power *info)
  619. {
  620. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  621. union acpi_object *cst;
  622. acpi_status status;
  623. u64 count;
  624. int last_index = 0;
  625. int i, ret = 0;
  626. status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
  627. if (ACPI_FAILURE(status)) {
  628. acpi_handle_debug(handle, "No _CST\n");
  629. return -ENODEV;
  630. }
  631. cst = buffer.pointer;
  632. /* There must be at least 2 elements. */
  633. if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
  634. acpi_handle_warn(handle, "Invalid _CST output\n");
  635. ret = -EFAULT;
  636. goto end;
  637. }
  638. count = cst->package.elements[0].integer.value;
  639. /* Validate the number of C-states. */
  640. if (count < 1 || count != cst->package.count - 1) {
  641. acpi_handle_warn(handle, "Inconsistent _CST data\n");
  642. ret = -EFAULT;
  643. goto end;
  644. }
  645. for (i = 1; i <= count; i++) {
  646. union acpi_object *element;
  647. union acpi_object *obj;
  648. struct acpi_power_register *reg;
  649. struct acpi_processor_cx cx;
  650. /*
  651. * If there is not enough space for all C-states, skip the
  652. * excess ones and log a warning.
  653. */
  654. if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
  655. acpi_handle_warn(handle,
  656. "No room for more idle states (limit: %d)\n",
  657. ACPI_PROCESSOR_MAX_POWER - 1);
  658. break;
  659. }
  660. memset(&cx, 0, sizeof(cx));
  661. element = &cst->package.elements[i];
  662. if (element->type != ACPI_TYPE_PACKAGE) {
  663. acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
  664. i, element->type);
  665. continue;
  666. }
  667. if (element->package.count != 4) {
  668. acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
  669. i, element->package.count);
  670. continue;
  671. }
  672. obj = &element->package.elements[0];
  673. if (obj->type != ACPI_TYPE_BUFFER) {
  674. acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
  675. i, obj->type);
  676. continue;
  677. }
  678. reg = (struct acpi_power_register *)obj->buffer.pointer;
  679. obj = &element->package.elements[1];
  680. if (obj->type != ACPI_TYPE_INTEGER) {
  681. acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
  682. i, obj->type);
  683. continue;
  684. }
  685. cx.type = obj->integer.value;
  686. /*
  687. * There are known cases in which the _CST output does not
  688. * contain C1, so if the type of the first state found is not
  689. * C1, leave an empty slot for C1 to be filled in later.
  690. */
  691. if (i == 1 && cx.type != ACPI_STATE_C1)
  692. last_index = 1;
  693. cx.address = reg->address;
  694. cx.index = last_index + 1;
  695. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  696. if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
  697. /*
  698. * In the majority of cases _CST describes C1 as
  699. * a FIXED_HARDWARE C-state, but if the command
  700. * line forbids using MWAIT, use CSTATE_HALT for
  701. * C1 regardless.
  702. */
  703. if (cx.type == ACPI_STATE_C1 &&
  704. boot_option_idle_override == IDLE_NOMWAIT) {
  705. cx.entry_method = ACPI_CSTATE_HALT;
  706. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  707. } else {
  708. cx.entry_method = ACPI_CSTATE_FFH;
  709. }
  710. } else if (cx.type == ACPI_STATE_C1) {
  711. /*
  712. * In the special case of C1, FIXED_HARDWARE can
  713. * be handled by executing the HLT instruction.
  714. */
  715. cx.entry_method = ACPI_CSTATE_HALT;
  716. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  717. } else {
  718. acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
  719. i);
  720. continue;
  721. }
  722. } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
  723. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  724. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  725. cx.address);
  726. } else {
  727. acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
  728. i, reg->space_id);
  729. continue;
  730. }
  731. if (cx.type == ACPI_STATE_C1)
  732. cx.valid = 1;
  733. obj = &element->package.elements[2];
  734. if (obj->type != ACPI_TYPE_INTEGER) {
  735. acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
  736. i, obj->type);
  737. continue;
  738. }
  739. cx.latency = obj->integer.value;
  740. obj = &element->package.elements[3];
  741. if (obj->type != ACPI_TYPE_INTEGER) {
  742. acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
  743. i, obj->type);
  744. continue;
  745. }
  746. memcpy(&info->states[++last_index], &cx, sizeof(cx));
  747. }
  748. acpi_handle_info(handle, "Found %d idle states\n", last_index);
  749. info->count = last_index;
  750. end:
  751. kfree(buffer.pointer);
  752. return ret;
  753. }
  754. EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
  755. #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */