ghes.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * APEI Generic Hardware Error Source support
  4. *
  5. * Generic Hardware Error Source provides a way to report platform
  6. * hardware errors (such as that from chipset). It works in so called
  7. * "Firmware First" mode, that is, hardware errors are reported to
  8. * firmware firstly, then reported to Linux by firmware. This way,
  9. * some non-standard hardware error registers or non-standard hardware
  10. * link can be checked by firmware to produce more hardware error
  11. * information for Linux.
  12. *
  13. * For more information about Generic Hardware Error Source, please
  14. * refer to ACPI Specification version 4.0, section 17.3.2.6
  15. *
  16. * Copyright 2010,2011 Intel Corp.
  17. * Author: Huang Ying <[email protected]>
  18. */
  19. #include <linux/arm_sdei.h>
  20. #include <linux/kernel.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/init.h>
  23. #include <linux/acpi.h>
  24. #include <linux/io.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/timer.h>
  27. #include <linux/cper.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/mutex.h>
  30. #include <linux/ratelimit.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/irq_work.h>
  33. #include <linux/llist.h>
  34. #include <linux/genalloc.h>
  35. #include <linux/pci.h>
  36. #include <linux/pfn.h>
  37. #include <linux/aer.h>
  38. #include <linux/nmi.h>
  39. #include <linux/sched/clock.h>
  40. #include <linux/uuid.h>
  41. #include <linux/ras.h>
  42. #include <linux/task_work.h>
  43. #include <acpi/actbl1.h>
  44. #include <acpi/ghes.h>
  45. #include <acpi/apei.h>
  46. #include <asm/fixmap.h>
  47. #include <asm/tlbflush.h>
  48. #include <ras/ras_event.h>
  49. #include "apei-internal.h"
  50. #define GHES_PFX "GHES: "
  51. #define GHES_ESTATUS_MAX_SIZE 65536
  52. #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
  53. #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
  54. /* This is just an estimation for memory pool allocation */
  55. #define GHES_ESTATUS_CACHE_AVG_SIZE 512
  56. #define GHES_ESTATUS_CACHES_SIZE 4
  57. #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
  58. /* Prevent too many caches are allocated because of RCU */
  59. #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
  60. #define GHES_ESTATUS_CACHE_LEN(estatus_len) \
  61. (sizeof(struct ghes_estatus_cache) + (estatus_len))
  62. #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
  63. ((struct acpi_hest_generic_status *) \
  64. ((struct ghes_estatus_cache *)(estatus_cache) + 1))
  65. #define GHES_ESTATUS_NODE_LEN(estatus_len) \
  66. (sizeof(struct ghes_estatus_node) + (estatus_len))
  67. #define GHES_ESTATUS_FROM_NODE(estatus_node) \
  68. ((struct acpi_hest_generic_status *) \
  69. ((struct ghes_estatus_node *)(estatus_node) + 1))
  70. #define GHES_VENDOR_ENTRY_LEN(gdata_len) \
  71. (sizeof(struct ghes_vendor_record_entry) + (gdata_len))
  72. #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \
  73. ((struct acpi_hest_generic_data *) \
  74. ((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
  75. /*
  76. * NMI-like notifications vary by architecture, before the compiler can prune
  77. * unused static functions it needs a value for these enums.
  78. */
  79. #ifndef CONFIG_ARM_SDE_INTERFACE
  80. #define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses
  81. #define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses
  82. #endif
  83. static inline bool is_hest_type_generic_v2(struct ghes *ghes)
  84. {
  85. return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
  86. }
  87. /*
  88. * This driver isn't really modular, however for the time being,
  89. * continuing to use module_param is the easiest way to remain
  90. * compatible with existing boot arg use cases.
  91. */
  92. bool ghes_disable;
  93. module_param_named(disable, ghes_disable, bool, 0);
  94. /*
  95. * All error sources notified with HED (Hardware Error Device) share a
  96. * single notifier callback, so they need to be linked and checked one
  97. * by one. This holds true for NMI too.
  98. *
  99. * RCU is used for these lists, so ghes_list_mutex is only used for
  100. * list changing, not for traversing.
  101. */
  102. static LIST_HEAD(ghes_hed);
  103. static DEFINE_MUTEX(ghes_list_mutex);
  104. /*
  105. * Because the memory area used to transfer hardware error information
  106. * from BIOS to Linux can be determined only in NMI, IRQ or timer
  107. * handler, but general ioremap can not be used in atomic context, so
  108. * the fixmap is used instead.
  109. *
  110. * This spinlock is used to prevent the fixmap entry from being used
  111. * simultaneously.
  112. */
  113. static DEFINE_SPINLOCK(ghes_notify_lock_irq);
  114. struct ghes_vendor_record_entry {
  115. struct work_struct work;
  116. int error_severity;
  117. char vendor_record[];
  118. };
  119. static struct gen_pool *ghes_estatus_pool;
  120. static unsigned long ghes_estatus_pool_size_request;
  121. static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
  122. static atomic_t ghes_estatus_cache_alloced;
  123. static int ghes_panic_timeout __read_mostly = 30;
  124. static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
  125. {
  126. phys_addr_t paddr;
  127. pgprot_t prot;
  128. paddr = PFN_PHYS(pfn);
  129. prot = arch_apei_get_mem_attribute(paddr);
  130. __set_fixmap(fixmap_idx, paddr, prot);
  131. return (void __iomem *) __fix_to_virt(fixmap_idx);
  132. }
  133. static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
  134. {
  135. int _idx = virt_to_fix((unsigned long)vaddr);
  136. WARN_ON_ONCE(fixmap_idx != _idx);
  137. clear_fixmap(fixmap_idx);
  138. }
  139. int ghes_estatus_pool_init(unsigned int num_ghes)
  140. {
  141. unsigned long addr, len;
  142. int rc;
  143. ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
  144. if (!ghes_estatus_pool)
  145. return -ENOMEM;
  146. len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
  147. len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
  148. ghes_estatus_pool_size_request = PAGE_ALIGN(len);
  149. addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
  150. if (!addr)
  151. goto err_pool_alloc;
  152. rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
  153. if (rc)
  154. goto err_pool_add;
  155. return 0;
  156. err_pool_add:
  157. vfree((void *)addr);
  158. err_pool_alloc:
  159. gen_pool_destroy(ghes_estatus_pool);
  160. return -ENOMEM;
  161. }
  162. static int map_gen_v2(struct ghes *ghes)
  163. {
  164. return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
  165. }
  166. static void unmap_gen_v2(struct ghes *ghes)
  167. {
  168. apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
  169. }
  170. static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
  171. {
  172. int rc;
  173. u64 val = 0;
  174. rc = apei_read(&val, &gv2->read_ack_register);
  175. if (rc)
  176. return;
  177. val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
  178. val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
  179. apei_write(val, &gv2->read_ack_register);
  180. }
  181. static struct ghes *ghes_new(struct acpi_hest_generic *generic)
  182. {
  183. struct ghes *ghes;
  184. unsigned int error_block_length;
  185. int rc;
  186. ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
  187. if (!ghes)
  188. return ERR_PTR(-ENOMEM);
  189. ghes->generic = generic;
  190. if (is_hest_type_generic_v2(ghes)) {
  191. rc = map_gen_v2(ghes);
  192. if (rc)
  193. goto err_free;
  194. }
  195. rc = apei_map_generic_address(&generic->error_status_address);
  196. if (rc)
  197. goto err_unmap_read_ack_addr;
  198. error_block_length = generic->error_block_length;
  199. if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
  200. pr_warn(FW_WARN GHES_PFX
  201. "Error status block length is too long: %u for "
  202. "generic hardware error source: %d.\n",
  203. error_block_length, generic->header.source_id);
  204. error_block_length = GHES_ESTATUS_MAX_SIZE;
  205. }
  206. ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
  207. if (!ghes->estatus) {
  208. rc = -ENOMEM;
  209. goto err_unmap_status_addr;
  210. }
  211. return ghes;
  212. err_unmap_status_addr:
  213. apei_unmap_generic_address(&generic->error_status_address);
  214. err_unmap_read_ack_addr:
  215. if (is_hest_type_generic_v2(ghes))
  216. unmap_gen_v2(ghes);
  217. err_free:
  218. kfree(ghes);
  219. return ERR_PTR(rc);
  220. }
  221. static void ghes_fini(struct ghes *ghes)
  222. {
  223. kfree(ghes->estatus);
  224. apei_unmap_generic_address(&ghes->generic->error_status_address);
  225. if (is_hest_type_generic_v2(ghes))
  226. unmap_gen_v2(ghes);
  227. }
  228. static inline int ghes_severity(int severity)
  229. {
  230. switch (severity) {
  231. case CPER_SEV_INFORMATIONAL:
  232. return GHES_SEV_NO;
  233. case CPER_SEV_CORRECTED:
  234. return GHES_SEV_CORRECTED;
  235. case CPER_SEV_RECOVERABLE:
  236. return GHES_SEV_RECOVERABLE;
  237. case CPER_SEV_FATAL:
  238. return GHES_SEV_PANIC;
  239. default:
  240. /* Unknown, go panic */
  241. return GHES_SEV_PANIC;
  242. }
  243. }
  244. static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
  245. int from_phys,
  246. enum fixed_addresses fixmap_idx)
  247. {
  248. void __iomem *vaddr;
  249. u64 offset;
  250. u32 trunk;
  251. while (len > 0) {
  252. offset = paddr - (paddr & PAGE_MASK);
  253. vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
  254. trunk = PAGE_SIZE - offset;
  255. trunk = min(trunk, len);
  256. if (from_phys)
  257. memcpy_fromio(buffer, vaddr + offset, trunk);
  258. else
  259. memcpy_toio(vaddr + offset, buffer, trunk);
  260. len -= trunk;
  261. paddr += trunk;
  262. buffer += trunk;
  263. ghes_unmap(vaddr, fixmap_idx);
  264. }
  265. }
  266. /* Check the top-level record header has an appropriate size. */
  267. static int __ghes_check_estatus(struct ghes *ghes,
  268. struct acpi_hest_generic_status *estatus)
  269. {
  270. u32 len = cper_estatus_len(estatus);
  271. if (len < sizeof(*estatus)) {
  272. pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
  273. return -EIO;
  274. }
  275. if (len > ghes->generic->error_block_length) {
  276. pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
  277. return -EIO;
  278. }
  279. if (cper_estatus_check_header(estatus)) {
  280. pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
  281. return -EIO;
  282. }
  283. return 0;
  284. }
  285. /* Read the CPER block, returning its address, and header in estatus. */
  286. static int __ghes_peek_estatus(struct ghes *ghes,
  287. struct acpi_hest_generic_status *estatus,
  288. u64 *buf_paddr, enum fixed_addresses fixmap_idx)
  289. {
  290. struct acpi_hest_generic *g = ghes->generic;
  291. int rc;
  292. rc = apei_read(buf_paddr, &g->error_status_address);
  293. if (rc) {
  294. *buf_paddr = 0;
  295. pr_warn_ratelimited(FW_WARN GHES_PFX
  296. "Failed to read error status block address for hardware error source: %d.\n",
  297. g->header.source_id);
  298. return -EIO;
  299. }
  300. if (!*buf_paddr)
  301. return -ENOENT;
  302. ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
  303. fixmap_idx);
  304. if (!estatus->block_status) {
  305. *buf_paddr = 0;
  306. return -ENOENT;
  307. }
  308. return 0;
  309. }
  310. static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
  311. u64 buf_paddr, enum fixed_addresses fixmap_idx,
  312. size_t buf_len)
  313. {
  314. ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
  315. if (cper_estatus_check(estatus)) {
  316. pr_warn_ratelimited(FW_WARN GHES_PFX
  317. "Failed to read error status block!\n");
  318. return -EIO;
  319. }
  320. return 0;
  321. }
  322. static int ghes_read_estatus(struct ghes *ghes,
  323. struct acpi_hest_generic_status *estatus,
  324. u64 *buf_paddr, enum fixed_addresses fixmap_idx)
  325. {
  326. int rc;
  327. rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
  328. if (rc)
  329. return rc;
  330. rc = __ghes_check_estatus(ghes, estatus);
  331. if (rc)
  332. return rc;
  333. return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
  334. cper_estatus_len(estatus));
  335. }
  336. static void ghes_clear_estatus(struct ghes *ghes,
  337. struct acpi_hest_generic_status *estatus,
  338. u64 buf_paddr, enum fixed_addresses fixmap_idx)
  339. {
  340. estatus->block_status = 0;
  341. if (!buf_paddr)
  342. return;
  343. ghes_copy_tofrom_phys(estatus, buf_paddr,
  344. sizeof(estatus->block_status), 0,
  345. fixmap_idx);
  346. /*
  347. * GHESv2 type HEST entries introduce support for error acknowledgment,
  348. * so only acknowledge the error if this support is present.
  349. */
  350. if (is_hest_type_generic_v2(ghes))
  351. ghes_ack_error(ghes->generic_v2);
  352. }
  353. /*
  354. * Called as task_work before returning to user-space.
  355. * Ensure any queued work has been done before we return to the context that
  356. * triggered the notification.
  357. */
  358. static void ghes_kick_task_work(struct callback_head *head)
  359. {
  360. struct acpi_hest_generic_status *estatus;
  361. struct ghes_estatus_node *estatus_node;
  362. u32 node_len;
  363. estatus_node = container_of(head, struct ghes_estatus_node, task_work);
  364. if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
  365. memory_failure_queue_kick(estatus_node->task_work_cpu);
  366. estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
  367. node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
  368. gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
  369. }
  370. static bool ghes_do_memory_failure(u64 physical_addr, int flags)
  371. {
  372. unsigned long pfn;
  373. if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
  374. return false;
  375. pfn = PHYS_PFN(physical_addr);
  376. if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
  377. pr_warn_ratelimited(FW_WARN GHES_PFX
  378. "Invalid address in generic error data: %#llx\n",
  379. physical_addr);
  380. return false;
  381. }
  382. memory_failure_queue(pfn, flags);
  383. return true;
  384. }
  385. static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
  386. int sev)
  387. {
  388. int flags = -1;
  389. int sec_sev = ghes_severity(gdata->error_severity);
  390. struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
  391. if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
  392. return false;
  393. /* iff following two events can be handled properly by now */
  394. if (sec_sev == GHES_SEV_CORRECTED &&
  395. (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
  396. flags = MF_SOFT_OFFLINE;
  397. if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
  398. flags = 0;
  399. if (flags != -1)
  400. return ghes_do_memory_failure(mem_err->physical_addr, flags);
  401. return false;
  402. }
  403. static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
  404. {
  405. struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
  406. bool queued = false;
  407. int sec_sev, i;
  408. char *p;
  409. log_arm_hw_error(err);
  410. sec_sev = ghes_severity(gdata->error_severity);
  411. if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
  412. return false;
  413. p = (char *)(err + 1);
  414. for (i = 0; i < err->err_info_num; i++) {
  415. struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
  416. bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
  417. bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
  418. const char *error_type = "unknown error";
  419. /*
  420. * The field (err_info->error_info & BIT(26)) is fixed to set to
  421. * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
  422. * firmware won't mix corrected errors in an uncorrected section,
  423. * and don't filter out 'corrected' error here.
  424. */
  425. if (is_cache && has_pa) {
  426. queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
  427. p += err_info->length;
  428. continue;
  429. }
  430. if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
  431. error_type = cper_proc_error_type_strs[err_info->type];
  432. pr_warn_ratelimited(FW_WARN GHES_PFX
  433. "Unhandled processor error type: %s\n",
  434. error_type);
  435. p += err_info->length;
  436. }
  437. return queued;
  438. }
  439. /*
  440. * PCIe AER errors need to be sent to the AER driver for reporting and
  441. * recovery. The GHES severities map to the following AER severities and
  442. * require the following handling:
  443. *
  444. * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
  445. * These need to be reported by the AER driver but no recovery is
  446. * necessary.
  447. * GHES_SEV_RECOVERABLE -> AER_NONFATAL
  448. * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
  449. * These both need to be reported and recovered from by the AER driver.
  450. * GHES_SEV_PANIC does not make it to this handling since the kernel must
  451. * panic.
  452. */
  453. static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
  454. {
  455. #ifdef CONFIG_ACPI_APEI_PCIEAER
  456. struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
  457. if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
  458. pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
  459. unsigned int devfn;
  460. int aer_severity;
  461. devfn = PCI_DEVFN(pcie_err->device_id.device,
  462. pcie_err->device_id.function);
  463. aer_severity = cper_severity_to_aer(gdata->error_severity);
  464. /*
  465. * If firmware reset the component to contain
  466. * the error, we must reinitialize it before
  467. * use, so treat it as a fatal AER error.
  468. */
  469. if (gdata->flags & CPER_SEC_RESET)
  470. aer_severity = AER_FATAL;
  471. aer_recover_queue(pcie_err->device_id.segment,
  472. pcie_err->device_id.bus,
  473. devfn, aer_severity,
  474. (struct aer_capability_regs *)
  475. pcie_err->aer_info);
  476. }
  477. #endif
  478. }
  479. static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
  480. int ghes_register_vendor_record_notifier(struct notifier_block *nb)
  481. {
  482. return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
  483. }
  484. EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
  485. void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
  486. {
  487. blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
  488. }
  489. EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
  490. static void ghes_vendor_record_work_func(struct work_struct *work)
  491. {
  492. struct ghes_vendor_record_entry *entry;
  493. struct acpi_hest_generic_data *gdata;
  494. u32 len;
  495. entry = container_of(work, struct ghes_vendor_record_entry, work);
  496. gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
  497. blocking_notifier_call_chain(&vendor_record_notify_list,
  498. entry->error_severity, gdata);
  499. len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
  500. gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
  501. }
  502. static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
  503. int sev)
  504. {
  505. struct acpi_hest_generic_data *copied_gdata;
  506. struct ghes_vendor_record_entry *entry;
  507. u32 len;
  508. len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
  509. entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
  510. if (!entry)
  511. return;
  512. copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
  513. memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
  514. entry->error_severity = sev;
  515. INIT_WORK(&entry->work, ghes_vendor_record_work_func);
  516. schedule_work(&entry->work);
  517. }
  518. static bool ghes_do_proc(struct ghes *ghes,
  519. const struct acpi_hest_generic_status *estatus)
  520. {
  521. int sev, sec_sev;
  522. struct acpi_hest_generic_data *gdata;
  523. guid_t *sec_type;
  524. const guid_t *fru_id = &guid_null;
  525. char *fru_text = "";
  526. bool queued = false;
  527. sev = ghes_severity(estatus->error_severity);
  528. apei_estatus_for_each_section(estatus, gdata) {
  529. sec_type = (guid_t *)gdata->section_type;
  530. sec_sev = ghes_severity(gdata->error_severity);
  531. if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
  532. fru_id = (guid_t *)gdata->fru_id;
  533. if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
  534. fru_text = gdata->fru_text;
  535. if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
  536. struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
  537. ghes_edac_report_mem_error(sev, mem_err);
  538. arch_apei_report_mem_error(sev, mem_err);
  539. queued = ghes_handle_memory_failure(gdata, sev);
  540. }
  541. else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
  542. ghes_handle_aer(gdata);
  543. }
  544. else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
  545. queued = ghes_handle_arm_hw_error(gdata, sev);
  546. } else {
  547. void *err = acpi_hest_get_payload(gdata);
  548. ghes_defer_non_standard_event(gdata, sev);
  549. log_non_standard_event(sec_type, fru_id, fru_text,
  550. sec_sev, err,
  551. gdata->error_data_length);
  552. }
  553. }
  554. return queued;
  555. }
  556. static void __ghes_print_estatus(const char *pfx,
  557. const struct acpi_hest_generic *generic,
  558. const struct acpi_hest_generic_status *estatus)
  559. {
  560. static atomic_t seqno;
  561. unsigned int curr_seqno;
  562. char pfx_seq[64];
  563. if (pfx == NULL) {
  564. if (ghes_severity(estatus->error_severity) <=
  565. GHES_SEV_CORRECTED)
  566. pfx = KERN_WARNING;
  567. else
  568. pfx = KERN_ERR;
  569. }
  570. curr_seqno = atomic_inc_return(&seqno);
  571. snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
  572. printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
  573. pfx_seq, generic->header.source_id);
  574. cper_estatus_print(pfx_seq, estatus);
  575. }
  576. static int ghes_print_estatus(const char *pfx,
  577. const struct acpi_hest_generic *generic,
  578. const struct acpi_hest_generic_status *estatus)
  579. {
  580. /* Not more than 2 messages every 5 seconds */
  581. static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
  582. static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
  583. struct ratelimit_state *ratelimit;
  584. if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
  585. ratelimit = &ratelimit_corrected;
  586. else
  587. ratelimit = &ratelimit_uncorrected;
  588. if (__ratelimit(ratelimit)) {
  589. __ghes_print_estatus(pfx, generic, estatus);
  590. return 1;
  591. }
  592. return 0;
  593. }
  594. /*
  595. * GHES error status reporting throttle, to report more kinds of
  596. * errors, instead of just most frequently occurred errors.
  597. */
  598. static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
  599. {
  600. u32 len;
  601. int i, cached = 0;
  602. unsigned long long now;
  603. struct ghes_estatus_cache *cache;
  604. struct acpi_hest_generic_status *cache_estatus;
  605. len = cper_estatus_len(estatus);
  606. rcu_read_lock();
  607. for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
  608. cache = rcu_dereference(ghes_estatus_caches[i]);
  609. if (cache == NULL)
  610. continue;
  611. if (len != cache->estatus_len)
  612. continue;
  613. cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
  614. if (memcmp(estatus, cache_estatus, len))
  615. continue;
  616. atomic_inc(&cache->count);
  617. now = sched_clock();
  618. if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
  619. cached = 1;
  620. break;
  621. }
  622. rcu_read_unlock();
  623. return cached;
  624. }
  625. static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
  626. struct acpi_hest_generic *generic,
  627. struct acpi_hest_generic_status *estatus)
  628. {
  629. int alloced;
  630. u32 len, cache_len;
  631. struct ghes_estatus_cache *cache;
  632. struct acpi_hest_generic_status *cache_estatus;
  633. alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
  634. if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
  635. atomic_dec(&ghes_estatus_cache_alloced);
  636. return NULL;
  637. }
  638. len = cper_estatus_len(estatus);
  639. cache_len = GHES_ESTATUS_CACHE_LEN(len);
  640. cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
  641. if (!cache) {
  642. atomic_dec(&ghes_estatus_cache_alloced);
  643. return NULL;
  644. }
  645. cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
  646. memcpy(cache_estatus, estatus, len);
  647. cache->estatus_len = len;
  648. atomic_set(&cache->count, 0);
  649. cache->generic = generic;
  650. cache->time_in = sched_clock();
  651. return cache;
  652. }
  653. static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
  654. {
  655. u32 len;
  656. len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
  657. len = GHES_ESTATUS_CACHE_LEN(len);
  658. gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
  659. atomic_dec(&ghes_estatus_cache_alloced);
  660. }
  661. static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
  662. {
  663. struct ghes_estatus_cache *cache;
  664. cache = container_of(head, struct ghes_estatus_cache, rcu);
  665. ghes_estatus_cache_free(cache);
  666. }
  667. static void ghes_estatus_cache_add(
  668. struct acpi_hest_generic *generic,
  669. struct acpi_hest_generic_status *estatus)
  670. {
  671. int i, slot = -1, count;
  672. unsigned long long now, duration, period, max_period = 0;
  673. struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
  674. new_cache = ghes_estatus_cache_alloc(generic, estatus);
  675. if (new_cache == NULL)
  676. return;
  677. rcu_read_lock();
  678. now = sched_clock();
  679. for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
  680. cache = rcu_dereference(ghes_estatus_caches[i]);
  681. if (cache == NULL) {
  682. slot = i;
  683. slot_cache = NULL;
  684. break;
  685. }
  686. duration = now - cache->time_in;
  687. if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
  688. slot = i;
  689. slot_cache = cache;
  690. break;
  691. }
  692. count = atomic_read(&cache->count);
  693. period = duration;
  694. do_div(period, (count + 1));
  695. if (period > max_period) {
  696. max_period = period;
  697. slot = i;
  698. slot_cache = cache;
  699. }
  700. }
  701. /* new_cache must be put into array after its contents are written */
  702. smp_wmb();
  703. if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
  704. slot_cache, new_cache) == slot_cache) {
  705. if (slot_cache)
  706. call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
  707. } else
  708. ghes_estatus_cache_free(new_cache);
  709. rcu_read_unlock();
  710. }
  711. static void __ghes_panic(struct ghes *ghes,
  712. struct acpi_hest_generic_status *estatus,
  713. u64 buf_paddr, enum fixed_addresses fixmap_idx)
  714. {
  715. __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
  716. ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
  717. /* reboot to log the error! */
  718. if (!panic_timeout)
  719. panic_timeout = ghes_panic_timeout;
  720. panic("Fatal hardware error!");
  721. }
  722. static int ghes_proc(struct ghes *ghes)
  723. {
  724. struct acpi_hest_generic_status *estatus = ghes->estatus;
  725. u64 buf_paddr;
  726. int rc;
  727. rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
  728. if (rc)
  729. goto out;
  730. if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
  731. __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
  732. if (!ghes_estatus_cached(estatus)) {
  733. if (ghes_print_estatus(NULL, ghes->generic, estatus))
  734. ghes_estatus_cache_add(ghes->generic, estatus);
  735. }
  736. ghes_do_proc(ghes, estatus);
  737. out:
  738. ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
  739. return rc;
  740. }
  741. static void ghes_add_timer(struct ghes *ghes)
  742. {
  743. struct acpi_hest_generic *g = ghes->generic;
  744. unsigned long expire;
  745. if (!g->notify.poll_interval) {
  746. pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
  747. g->header.source_id);
  748. return;
  749. }
  750. expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
  751. ghes->timer.expires = round_jiffies_relative(expire);
  752. add_timer(&ghes->timer);
  753. }
  754. static void ghes_poll_func(struct timer_list *t)
  755. {
  756. struct ghes *ghes = from_timer(ghes, t, timer);
  757. unsigned long flags;
  758. spin_lock_irqsave(&ghes_notify_lock_irq, flags);
  759. ghes_proc(ghes);
  760. spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
  761. if (!(ghes->flags & GHES_EXITING))
  762. ghes_add_timer(ghes);
  763. }
  764. static irqreturn_t ghes_irq_func(int irq, void *data)
  765. {
  766. struct ghes *ghes = data;
  767. unsigned long flags;
  768. int rc;
  769. spin_lock_irqsave(&ghes_notify_lock_irq, flags);
  770. rc = ghes_proc(ghes);
  771. spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
  772. if (rc)
  773. return IRQ_NONE;
  774. return IRQ_HANDLED;
  775. }
  776. static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
  777. void *data)
  778. {
  779. struct ghes *ghes;
  780. unsigned long flags;
  781. int ret = NOTIFY_DONE;
  782. spin_lock_irqsave(&ghes_notify_lock_irq, flags);
  783. rcu_read_lock();
  784. list_for_each_entry_rcu(ghes, &ghes_hed, list) {
  785. if (!ghes_proc(ghes))
  786. ret = NOTIFY_OK;
  787. }
  788. rcu_read_unlock();
  789. spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
  790. return ret;
  791. }
  792. static struct notifier_block ghes_notifier_hed = {
  793. .notifier_call = ghes_notify_hed,
  794. };
  795. /*
  796. * Handlers for CPER records may not be NMI safe. For example,
  797. * memory_failure_queue() takes spinlocks and calls schedule_work_on().
  798. * In any NMI-like handler, memory from ghes_estatus_pool is used to save
  799. * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
  800. * ghes_proc_in_irq() to run in IRQ context where each estatus in
  801. * ghes_estatus_llist is processed.
  802. *
  803. * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
  804. * to suppress frequent messages.
  805. */
  806. static struct llist_head ghes_estatus_llist;
  807. static struct irq_work ghes_proc_irq_work;
  808. static void ghes_proc_in_irq(struct irq_work *irq_work)
  809. {
  810. struct llist_node *llnode, *next;
  811. struct ghes_estatus_node *estatus_node;
  812. struct acpi_hest_generic *generic;
  813. struct acpi_hest_generic_status *estatus;
  814. bool task_work_pending;
  815. u32 len, node_len;
  816. int ret;
  817. llnode = llist_del_all(&ghes_estatus_llist);
  818. /*
  819. * Because the time order of estatus in list is reversed,
  820. * revert it back to proper order.
  821. */
  822. llnode = llist_reverse_order(llnode);
  823. while (llnode) {
  824. next = llnode->next;
  825. estatus_node = llist_entry(llnode, struct ghes_estatus_node,
  826. llnode);
  827. estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
  828. len = cper_estatus_len(estatus);
  829. node_len = GHES_ESTATUS_NODE_LEN(len);
  830. task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
  831. if (!ghes_estatus_cached(estatus)) {
  832. generic = estatus_node->generic;
  833. if (ghes_print_estatus(NULL, generic, estatus))
  834. ghes_estatus_cache_add(generic, estatus);
  835. }
  836. if (task_work_pending && current->mm) {
  837. estatus_node->task_work.func = ghes_kick_task_work;
  838. estatus_node->task_work_cpu = smp_processor_id();
  839. ret = task_work_add(current, &estatus_node->task_work,
  840. TWA_RESUME);
  841. if (ret)
  842. estatus_node->task_work.func = NULL;
  843. }
  844. if (!estatus_node->task_work.func)
  845. gen_pool_free(ghes_estatus_pool,
  846. (unsigned long)estatus_node, node_len);
  847. llnode = next;
  848. }
  849. }
  850. static void ghes_print_queued_estatus(void)
  851. {
  852. struct llist_node *llnode;
  853. struct ghes_estatus_node *estatus_node;
  854. struct acpi_hest_generic *generic;
  855. struct acpi_hest_generic_status *estatus;
  856. llnode = llist_del_all(&ghes_estatus_llist);
  857. /*
  858. * Because the time order of estatus in list is reversed,
  859. * revert it back to proper order.
  860. */
  861. llnode = llist_reverse_order(llnode);
  862. while (llnode) {
  863. estatus_node = llist_entry(llnode, struct ghes_estatus_node,
  864. llnode);
  865. estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
  866. generic = estatus_node->generic;
  867. ghes_print_estatus(NULL, generic, estatus);
  868. llnode = llnode->next;
  869. }
  870. }
  871. static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
  872. enum fixed_addresses fixmap_idx)
  873. {
  874. struct acpi_hest_generic_status *estatus, tmp_header;
  875. struct ghes_estatus_node *estatus_node;
  876. u32 len, node_len;
  877. u64 buf_paddr;
  878. int sev, rc;
  879. if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
  880. return -EOPNOTSUPP;
  881. rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
  882. if (rc) {
  883. ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
  884. return rc;
  885. }
  886. rc = __ghes_check_estatus(ghes, &tmp_header);
  887. if (rc) {
  888. ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
  889. return rc;
  890. }
  891. len = cper_estatus_len(&tmp_header);
  892. node_len = GHES_ESTATUS_NODE_LEN(len);
  893. estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
  894. if (!estatus_node)
  895. return -ENOMEM;
  896. estatus_node->ghes = ghes;
  897. estatus_node->generic = ghes->generic;
  898. estatus_node->task_work.func = NULL;
  899. estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
  900. if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
  901. ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
  902. rc = -ENOENT;
  903. goto no_work;
  904. }
  905. sev = ghes_severity(estatus->error_severity);
  906. if (sev >= GHES_SEV_PANIC) {
  907. ghes_print_queued_estatus();
  908. __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
  909. }
  910. ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
  911. /* This error has been reported before, don't process it again. */
  912. if (ghes_estatus_cached(estatus))
  913. goto no_work;
  914. llist_add(&estatus_node->llnode, &ghes_estatus_llist);
  915. return rc;
  916. no_work:
  917. gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
  918. node_len);
  919. return rc;
  920. }
  921. static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
  922. enum fixed_addresses fixmap_idx)
  923. {
  924. int ret = -ENOENT;
  925. struct ghes *ghes;
  926. rcu_read_lock();
  927. list_for_each_entry_rcu(ghes, rcu_list, list) {
  928. if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
  929. ret = 0;
  930. }
  931. rcu_read_unlock();
  932. if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
  933. irq_work_queue(&ghes_proc_irq_work);
  934. return ret;
  935. }
  936. #ifdef CONFIG_ACPI_APEI_SEA
  937. static LIST_HEAD(ghes_sea);
  938. /*
  939. * Return 0 only if one of the SEA error sources successfully reported an error
  940. * record sent from the firmware.
  941. */
  942. int ghes_notify_sea(void)
  943. {
  944. static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
  945. int rv;
  946. raw_spin_lock(&ghes_notify_lock_sea);
  947. rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
  948. raw_spin_unlock(&ghes_notify_lock_sea);
  949. return rv;
  950. }
  951. static void ghes_sea_add(struct ghes *ghes)
  952. {
  953. mutex_lock(&ghes_list_mutex);
  954. list_add_rcu(&ghes->list, &ghes_sea);
  955. mutex_unlock(&ghes_list_mutex);
  956. }
  957. static void ghes_sea_remove(struct ghes *ghes)
  958. {
  959. mutex_lock(&ghes_list_mutex);
  960. list_del_rcu(&ghes->list);
  961. mutex_unlock(&ghes_list_mutex);
  962. synchronize_rcu();
  963. }
  964. #else /* CONFIG_ACPI_APEI_SEA */
  965. static inline void ghes_sea_add(struct ghes *ghes) { }
  966. static inline void ghes_sea_remove(struct ghes *ghes) { }
  967. #endif /* CONFIG_ACPI_APEI_SEA */
  968. #ifdef CONFIG_HAVE_ACPI_APEI_NMI
  969. /*
  970. * NMI may be triggered on any CPU, so ghes_in_nmi is used for
  971. * having only one concurrent reader.
  972. */
  973. static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
  974. static LIST_HEAD(ghes_nmi);
  975. static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
  976. {
  977. static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
  978. int ret = NMI_DONE;
  979. if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
  980. return ret;
  981. raw_spin_lock(&ghes_notify_lock_nmi);
  982. if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
  983. ret = NMI_HANDLED;
  984. raw_spin_unlock(&ghes_notify_lock_nmi);
  985. atomic_dec(&ghes_in_nmi);
  986. return ret;
  987. }
  988. static void ghes_nmi_add(struct ghes *ghes)
  989. {
  990. mutex_lock(&ghes_list_mutex);
  991. if (list_empty(&ghes_nmi))
  992. register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
  993. list_add_rcu(&ghes->list, &ghes_nmi);
  994. mutex_unlock(&ghes_list_mutex);
  995. }
  996. static void ghes_nmi_remove(struct ghes *ghes)
  997. {
  998. mutex_lock(&ghes_list_mutex);
  999. list_del_rcu(&ghes->list);
  1000. if (list_empty(&ghes_nmi))
  1001. unregister_nmi_handler(NMI_LOCAL, "ghes");
  1002. mutex_unlock(&ghes_list_mutex);
  1003. /*
  1004. * To synchronize with NMI handler, ghes can only be
  1005. * freed after NMI handler finishes.
  1006. */
  1007. synchronize_rcu();
  1008. }
  1009. #else /* CONFIG_HAVE_ACPI_APEI_NMI */
  1010. static inline void ghes_nmi_add(struct ghes *ghes) { }
  1011. static inline void ghes_nmi_remove(struct ghes *ghes) { }
  1012. #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
  1013. static void ghes_nmi_init_cxt(void)
  1014. {
  1015. init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
  1016. }
  1017. static int __ghes_sdei_callback(struct ghes *ghes,
  1018. enum fixed_addresses fixmap_idx)
  1019. {
  1020. if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
  1021. irq_work_queue(&ghes_proc_irq_work);
  1022. return 0;
  1023. }
  1024. return -ENOENT;
  1025. }
  1026. static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
  1027. void *arg)
  1028. {
  1029. static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
  1030. struct ghes *ghes = arg;
  1031. int err;
  1032. raw_spin_lock(&ghes_notify_lock_sdei_normal);
  1033. err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
  1034. raw_spin_unlock(&ghes_notify_lock_sdei_normal);
  1035. return err;
  1036. }
  1037. static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
  1038. void *arg)
  1039. {
  1040. static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
  1041. struct ghes *ghes = arg;
  1042. int err;
  1043. raw_spin_lock(&ghes_notify_lock_sdei_critical);
  1044. err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
  1045. raw_spin_unlock(&ghes_notify_lock_sdei_critical);
  1046. return err;
  1047. }
  1048. static int apei_sdei_register_ghes(struct ghes *ghes)
  1049. {
  1050. if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
  1051. return -EOPNOTSUPP;
  1052. return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
  1053. ghes_sdei_critical_callback);
  1054. }
  1055. static int apei_sdei_unregister_ghes(struct ghes *ghes)
  1056. {
  1057. if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
  1058. return -EOPNOTSUPP;
  1059. return sdei_unregister_ghes(ghes);
  1060. }
  1061. static int ghes_probe(struct platform_device *ghes_dev)
  1062. {
  1063. struct acpi_hest_generic *generic;
  1064. struct ghes *ghes = NULL;
  1065. unsigned long flags;
  1066. int rc = -EINVAL;
  1067. generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
  1068. if (!generic->enabled)
  1069. return -ENODEV;
  1070. switch (generic->notify.type) {
  1071. case ACPI_HEST_NOTIFY_POLLED:
  1072. case ACPI_HEST_NOTIFY_EXTERNAL:
  1073. case ACPI_HEST_NOTIFY_SCI:
  1074. case ACPI_HEST_NOTIFY_GSIV:
  1075. case ACPI_HEST_NOTIFY_GPIO:
  1076. break;
  1077. case ACPI_HEST_NOTIFY_SEA:
  1078. if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
  1079. pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n",
  1080. generic->header.source_id);
  1081. rc = -ENOTSUPP;
  1082. goto err;
  1083. }
  1084. break;
  1085. case ACPI_HEST_NOTIFY_NMI:
  1086. if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
  1087. pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
  1088. generic->header.source_id);
  1089. goto err;
  1090. }
  1091. break;
  1092. case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
  1093. if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
  1094. pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
  1095. generic->header.source_id);
  1096. goto err;
  1097. }
  1098. break;
  1099. case ACPI_HEST_NOTIFY_LOCAL:
  1100. pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
  1101. generic->header.source_id);
  1102. goto err;
  1103. default:
  1104. pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
  1105. generic->notify.type, generic->header.source_id);
  1106. goto err;
  1107. }
  1108. rc = -EIO;
  1109. if (generic->error_block_length <
  1110. sizeof(struct acpi_hest_generic_status)) {
  1111. pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
  1112. generic->error_block_length, generic->header.source_id);
  1113. goto err;
  1114. }
  1115. ghes = ghes_new(generic);
  1116. if (IS_ERR(ghes)) {
  1117. rc = PTR_ERR(ghes);
  1118. ghes = NULL;
  1119. goto err;
  1120. }
  1121. switch (generic->notify.type) {
  1122. case ACPI_HEST_NOTIFY_POLLED:
  1123. timer_setup(&ghes->timer, ghes_poll_func, 0);
  1124. ghes_add_timer(ghes);
  1125. break;
  1126. case ACPI_HEST_NOTIFY_EXTERNAL:
  1127. /* External interrupt vector is GSI */
  1128. rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
  1129. if (rc) {
  1130. pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
  1131. generic->header.source_id);
  1132. goto err;
  1133. }
  1134. rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
  1135. "GHES IRQ", ghes);
  1136. if (rc) {
  1137. pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
  1138. generic->header.source_id);
  1139. goto err;
  1140. }
  1141. break;
  1142. case ACPI_HEST_NOTIFY_SCI:
  1143. case ACPI_HEST_NOTIFY_GSIV:
  1144. case ACPI_HEST_NOTIFY_GPIO:
  1145. mutex_lock(&ghes_list_mutex);
  1146. if (list_empty(&ghes_hed))
  1147. register_acpi_hed_notifier(&ghes_notifier_hed);
  1148. list_add_rcu(&ghes->list, &ghes_hed);
  1149. mutex_unlock(&ghes_list_mutex);
  1150. break;
  1151. case ACPI_HEST_NOTIFY_SEA:
  1152. ghes_sea_add(ghes);
  1153. break;
  1154. case ACPI_HEST_NOTIFY_NMI:
  1155. ghes_nmi_add(ghes);
  1156. break;
  1157. case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
  1158. rc = apei_sdei_register_ghes(ghes);
  1159. if (rc)
  1160. goto err;
  1161. break;
  1162. default:
  1163. BUG();
  1164. }
  1165. platform_set_drvdata(ghes_dev, ghes);
  1166. ghes_edac_register(ghes, &ghes_dev->dev);
  1167. /* Handle any pending errors right away */
  1168. spin_lock_irqsave(&ghes_notify_lock_irq, flags);
  1169. ghes_proc(ghes);
  1170. spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
  1171. return 0;
  1172. err:
  1173. if (ghes) {
  1174. ghes_fini(ghes);
  1175. kfree(ghes);
  1176. }
  1177. return rc;
  1178. }
  1179. static int ghes_remove(struct platform_device *ghes_dev)
  1180. {
  1181. int rc;
  1182. struct ghes *ghes;
  1183. struct acpi_hest_generic *generic;
  1184. ghes = platform_get_drvdata(ghes_dev);
  1185. generic = ghes->generic;
  1186. ghes->flags |= GHES_EXITING;
  1187. switch (generic->notify.type) {
  1188. case ACPI_HEST_NOTIFY_POLLED:
  1189. del_timer_sync(&ghes->timer);
  1190. break;
  1191. case ACPI_HEST_NOTIFY_EXTERNAL:
  1192. free_irq(ghes->irq, ghes);
  1193. break;
  1194. case ACPI_HEST_NOTIFY_SCI:
  1195. case ACPI_HEST_NOTIFY_GSIV:
  1196. case ACPI_HEST_NOTIFY_GPIO:
  1197. mutex_lock(&ghes_list_mutex);
  1198. list_del_rcu(&ghes->list);
  1199. if (list_empty(&ghes_hed))
  1200. unregister_acpi_hed_notifier(&ghes_notifier_hed);
  1201. mutex_unlock(&ghes_list_mutex);
  1202. synchronize_rcu();
  1203. break;
  1204. case ACPI_HEST_NOTIFY_SEA:
  1205. ghes_sea_remove(ghes);
  1206. break;
  1207. case ACPI_HEST_NOTIFY_NMI:
  1208. ghes_nmi_remove(ghes);
  1209. break;
  1210. case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
  1211. rc = apei_sdei_unregister_ghes(ghes);
  1212. if (rc)
  1213. return rc;
  1214. break;
  1215. default:
  1216. BUG();
  1217. break;
  1218. }
  1219. ghes_fini(ghes);
  1220. ghes_edac_unregister(ghes);
  1221. kfree(ghes);
  1222. platform_set_drvdata(ghes_dev, NULL);
  1223. return 0;
  1224. }
  1225. static struct platform_driver ghes_platform_driver = {
  1226. .driver = {
  1227. .name = "GHES",
  1228. },
  1229. .probe = ghes_probe,
  1230. .remove = ghes_remove,
  1231. };
  1232. void __init acpi_ghes_init(void)
  1233. {
  1234. int rc;
  1235. sdei_init();
  1236. if (acpi_disabled)
  1237. return;
  1238. switch (hest_disable) {
  1239. case HEST_NOT_FOUND:
  1240. return;
  1241. case HEST_DISABLED:
  1242. pr_info(GHES_PFX "HEST is not enabled!\n");
  1243. return;
  1244. default:
  1245. break;
  1246. }
  1247. if (ghes_disable) {
  1248. pr_info(GHES_PFX "GHES is not enabled!\n");
  1249. return;
  1250. }
  1251. ghes_nmi_init_cxt();
  1252. rc = platform_driver_register(&ghes_platform_driver);
  1253. if (rc)
  1254. return;
  1255. rc = apei_osc_setup();
  1256. if (rc == 0 && osc_sb_apei_support_acked)
  1257. pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
  1258. else if (rc == 0 && !osc_sb_apei_support_acked)
  1259. pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
  1260. else if (rc && osc_sb_apei_support_acked)
  1261. pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
  1262. else
  1263. pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
  1264. }