erst.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * APEI Error Record Serialization Table support
  4. *
  5. * ERST is a way provided by APEI to save and retrieve hardware error
  6. * information to and from a persistent store.
  7. *
  8. * For more information about ERST, please refer to ACPI Specification
  9. * version 4.0, section 17.4.
  10. *
  11. * Copyright 2010 Intel Corp.
  12. * Author: Huang Ying <[email protected]>
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/delay.h>
  18. #include <linux/io.h>
  19. #include <linux/acpi.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/cper.h>
  22. #include <linux/nmi.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/pstore.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/mm.h> /* kvfree() */
  27. #include <acpi/apei.h>
  28. #include "apei-internal.h"
  29. #undef pr_fmt
  30. #define pr_fmt(fmt) "ERST: " fmt
  31. /* ERST command status */
  32. #define ERST_STATUS_SUCCESS 0x0
  33. #define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
  34. #define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
  35. #define ERST_STATUS_FAILED 0x3
  36. #define ERST_STATUS_RECORD_STORE_EMPTY 0x4
  37. #define ERST_STATUS_RECORD_NOT_FOUND 0x5
  38. #define ERST_TAB_ENTRY(tab) \
  39. ((struct acpi_whea_header *)((char *)(tab) + \
  40. sizeof(struct acpi_table_erst)))
  41. #define SPIN_UNIT 100 /* 100ns */
  42. /* Firmware should respond within 1 milliseconds */
  43. #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
  44. #define FIRMWARE_MAX_STALL 50 /* 50us */
  45. int erst_disable;
  46. EXPORT_SYMBOL_GPL(erst_disable);
  47. static struct acpi_table_erst *erst_tab;
  48. /* ERST Error Log Address Range attributes */
  49. #define ERST_RANGE_RESERVED 0x0001
  50. #define ERST_RANGE_NVRAM 0x0002
  51. #define ERST_RANGE_SLOW 0x0004
  52. /*
  53. * ERST Error Log Address Range, used as buffer for reading/writing
  54. * error records.
  55. */
  56. static struct erst_erange {
  57. u64 base;
  58. u64 size;
  59. void __iomem *vaddr;
  60. u32 attr;
  61. } erst_erange;
  62. /*
  63. * Prevent ERST interpreter to run simultaneously, because the
  64. * corresponding firmware implementation may not work properly when
  65. * invoked simultaneously.
  66. *
  67. * It is used to provide exclusive accessing for ERST Error Log
  68. * Address Range too.
  69. */
  70. static DEFINE_RAW_SPINLOCK(erst_lock);
  71. static inline int erst_errno(int command_status)
  72. {
  73. switch (command_status) {
  74. case ERST_STATUS_SUCCESS:
  75. return 0;
  76. case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
  77. return -ENODEV;
  78. case ERST_STATUS_NOT_ENOUGH_SPACE:
  79. return -ENOSPC;
  80. case ERST_STATUS_RECORD_STORE_EMPTY:
  81. case ERST_STATUS_RECORD_NOT_FOUND:
  82. return -ENOENT;
  83. default:
  84. return -EINVAL;
  85. }
  86. }
  87. static int erst_timedout(u64 *t, u64 spin_unit)
  88. {
  89. if ((s64)*t < spin_unit) {
  90. pr_warn(FW_WARN "Firmware does not respond in time.\n");
  91. return 1;
  92. }
  93. *t -= spin_unit;
  94. ndelay(spin_unit);
  95. touch_nmi_watchdog();
  96. return 0;
  97. }
  98. static int erst_exec_load_var1(struct apei_exec_context *ctx,
  99. struct acpi_whea_header *entry)
  100. {
  101. return __apei_exec_read_register(entry, &ctx->var1);
  102. }
  103. static int erst_exec_load_var2(struct apei_exec_context *ctx,
  104. struct acpi_whea_header *entry)
  105. {
  106. return __apei_exec_read_register(entry, &ctx->var2);
  107. }
  108. static int erst_exec_store_var1(struct apei_exec_context *ctx,
  109. struct acpi_whea_header *entry)
  110. {
  111. return __apei_exec_write_register(entry, ctx->var1);
  112. }
  113. static int erst_exec_add(struct apei_exec_context *ctx,
  114. struct acpi_whea_header *entry)
  115. {
  116. ctx->var1 += ctx->var2;
  117. return 0;
  118. }
  119. static int erst_exec_subtract(struct apei_exec_context *ctx,
  120. struct acpi_whea_header *entry)
  121. {
  122. ctx->var1 -= ctx->var2;
  123. return 0;
  124. }
  125. static int erst_exec_add_value(struct apei_exec_context *ctx,
  126. struct acpi_whea_header *entry)
  127. {
  128. int rc;
  129. u64 val;
  130. rc = __apei_exec_read_register(entry, &val);
  131. if (rc)
  132. return rc;
  133. val += ctx->value;
  134. rc = __apei_exec_write_register(entry, val);
  135. return rc;
  136. }
  137. static int erst_exec_subtract_value(struct apei_exec_context *ctx,
  138. struct acpi_whea_header *entry)
  139. {
  140. int rc;
  141. u64 val;
  142. rc = __apei_exec_read_register(entry, &val);
  143. if (rc)
  144. return rc;
  145. val -= ctx->value;
  146. rc = __apei_exec_write_register(entry, val);
  147. return rc;
  148. }
  149. static int erst_exec_stall(struct apei_exec_context *ctx,
  150. struct acpi_whea_header *entry)
  151. {
  152. u64 stall_time;
  153. if (ctx->value > FIRMWARE_MAX_STALL) {
  154. if (!in_nmi())
  155. pr_warn(FW_WARN
  156. "Too long stall time for stall instruction: 0x%llx.\n",
  157. ctx->value);
  158. stall_time = FIRMWARE_MAX_STALL;
  159. } else
  160. stall_time = ctx->value;
  161. udelay(stall_time);
  162. return 0;
  163. }
  164. static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
  165. struct acpi_whea_header *entry)
  166. {
  167. int rc;
  168. u64 val;
  169. u64 timeout = FIRMWARE_TIMEOUT;
  170. u64 stall_time;
  171. if (ctx->var1 > FIRMWARE_MAX_STALL) {
  172. if (!in_nmi())
  173. pr_warn(FW_WARN
  174. "Too long stall time for stall while true instruction: 0x%llx.\n",
  175. ctx->var1);
  176. stall_time = FIRMWARE_MAX_STALL;
  177. } else
  178. stall_time = ctx->var1;
  179. for (;;) {
  180. rc = __apei_exec_read_register(entry, &val);
  181. if (rc)
  182. return rc;
  183. if (val != ctx->value)
  184. break;
  185. if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
  186. return -EIO;
  187. }
  188. return 0;
  189. }
  190. static int erst_exec_skip_next_instruction_if_true(
  191. struct apei_exec_context *ctx,
  192. struct acpi_whea_header *entry)
  193. {
  194. int rc;
  195. u64 val;
  196. rc = __apei_exec_read_register(entry, &val);
  197. if (rc)
  198. return rc;
  199. if (val == ctx->value) {
  200. ctx->ip += 2;
  201. return APEI_EXEC_SET_IP;
  202. }
  203. return 0;
  204. }
  205. static int erst_exec_goto(struct apei_exec_context *ctx,
  206. struct acpi_whea_header *entry)
  207. {
  208. ctx->ip = ctx->value;
  209. return APEI_EXEC_SET_IP;
  210. }
  211. static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
  212. struct acpi_whea_header *entry)
  213. {
  214. return __apei_exec_read_register(entry, &ctx->src_base);
  215. }
  216. static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
  217. struct acpi_whea_header *entry)
  218. {
  219. return __apei_exec_read_register(entry, &ctx->dst_base);
  220. }
  221. static int erst_exec_move_data(struct apei_exec_context *ctx,
  222. struct acpi_whea_header *entry)
  223. {
  224. int rc;
  225. u64 offset;
  226. void *src, *dst;
  227. /* ioremap does not work in interrupt context */
  228. if (in_interrupt()) {
  229. pr_warn("MOVE_DATA can not be used in interrupt context.\n");
  230. return -EBUSY;
  231. }
  232. rc = __apei_exec_read_register(entry, &offset);
  233. if (rc)
  234. return rc;
  235. src = ioremap(ctx->src_base + offset, ctx->var2);
  236. if (!src)
  237. return -ENOMEM;
  238. dst = ioremap(ctx->dst_base + offset, ctx->var2);
  239. if (!dst) {
  240. iounmap(src);
  241. return -ENOMEM;
  242. }
  243. memmove(dst, src, ctx->var2);
  244. iounmap(src);
  245. iounmap(dst);
  246. return 0;
  247. }
  248. static struct apei_exec_ins_type erst_ins_type[] = {
  249. [ACPI_ERST_READ_REGISTER] = {
  250. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  251. .run = apei_exec_read_register,
  252. },
  253. [ACPI_ERST_READ_REGISTER_VALUE] = {
  254. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  255. .run = apei_exec_read_register_value,
  256. },
  257. [ACPI_ERST_WRITE_REGISTER] = {
  258. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  259. .run = apei_exec_write_register,
  260. },
  261. [ACPI_ERST_WRITE_REGISTER_VALUE] = {
  262. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  263. .run = apei_exec_write_register_value,
  264. },
  265. [ACPI_ERST_NOOP] = {
  266. .flags = 0,
  267. .run = apei_exec_noop,
  268. },
  269. [ACPI_ERST_LOAD_VAR1] = {
  270. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  271. .run = erst_exec_load_var1,
  272. },
  273. [ACPI_ERST_LOAD_VAR2] = {
  274. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  275. .run = erst_exec_load_var2,
  276. },
  277. [ACPI_ERST_STORE_VAR1] = {
  278. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  279. .run = erst_exec_store_var1,
  280. },
  281. [ACPI_ERST_ADD] = {
  282. .flags = 0,
  283. .run = erst_exec_add,
  284. },
  285. [ACPI_ERST_SUBTRACT] = {
  286. .flags = 0,
  287. .run = erst_exec_subtract,
  288. },
  289. [ACPI_ERST_ADD_VALUE] = {
  290. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  291. .run = erst_exec_add_value,
  292. },
  293. [ACPI_ERST_SUBTRACT_VALUE] = {
  294. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  295. .run = erst_exec_subtract_value,
  296. },
  297. [ACPI_ERST_STALL] = {
  298. .flags = 0,
  299. .run = erst_exec_stall,
  300. },
  301. [ACPI_ERST_STALL_WHILE_TRUE] = {
  302. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  303. .run = erst_exec_stall_while_true,
  304. },
  305. [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
  306. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  307. .run = erst_exec_skip_next_instruction_if_true,
  308. },
  309. [ACPI_ERST_GOTO] = {
  310. .flags = 0,
  311. .run = erst_exec_goto,
  312. },
  313. [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
  314. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  315. .run = erst_exec_set_src_address_base,
  316. },
  317. [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
  318. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  319. .run = erst_exec_set_dst_address_base,
  320. },
  321. [ACPI_ERST_MOVE_DATA] = {
  322. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  323. .run = erst_exec_move_data,
  324. },
  325. };
  326. static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
  327. {
  328. apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
  329. ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
  330. }
  331. static int erst_get_erange(struct erst_erange *range)
  332. {
  333. struct apei_exec_context ctx;
  334. int rc;
  335. erst_exec_ctx_init(&ctx);
  336. rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
  337. if (rc)
  338. return rc;
  339. range->base = apei_exec_ctx_get_output(&ctx);
  340. rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
  341. if (rc)
  342. return rc;
  343. range->size = apei_exec_ctx_get_output(&ctx);
  344. rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
  345. if (rc)
  346. return rc;
  347. range->attr = apei_exec_ctx_get_output(&ctx);
  348. return 0;
  349. }
  350. static ssize_t __erst_get_record_count(void)
  351. {
  352. struct apei_exec_context ctx;
  353. int rc;
  354. erst_exec_ctx_init(&ctx);
  355. rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
  356. if (rc)
  357. return rc;
  358. return apei_exec_ctx_get_output(&ctx);
  359. }
  360. ssize_t erst_get_record_count(void)
  361. {
  362. ssize_t count;
  363. unsigned long flags;
  364. if (erst_disable)
  365. return -ENODEV;
  366. raw_spin_lock_irqsave(&erst_lock, flags);
  367. count = __erst_get_record_count();
  368. raw_spin_unlock_irqrestore(&erst_lock, flags);
  369. return count;
  370. }
  371. EXPORT_SYMBOL_GPL(erst_get_record_count);
  372. #define ERST_RECORD_ID_CACHE_SIZE_MIN 16
  373. #define ERST_RECORD_ID_CACHE_SIZE_MAX 1024
  374. struct erst_record_id_cache {
  375. struct mutex lock;
  376. u64 *entries;
  377. int len;
  378. int size;
  379. int refcount;
  380. };
  381. static struct erst_record_id_cache erst_record_id_cache = {
  382. .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock),
  383. .refcount = 0,
  384. };
  385. static int __erst_get_next_record_id(u64 *record_id)
  386. {
  387. struct apei_exec_context ctx;
  388. int rc;
  389. erst_exec_ctx_init(&ctx);
  390. rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
  391. if (rc)
  392. return rc;
  393. *record_id = apei_exec_ctx_get_output(&ctx);
  394. return 0;
  395. }
  396. int erst_get_record_id_begin(int *pos)
  397. {
  398. int rc;
  399. if (erst_disable)
  400. return -ENODEV;
  401. rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
  402. if (rc)
  403. return rc;
  404. erst_record_id_cache.refcount++;
  405. mutex_unlock(&erst_record_id_cache.lock);
  406. *pos = 0;
  407. return 0;
  408. }
  409. EXPORT_SYMBOL_GPL(erst_get_record_id_begin);
  410. /* erst_record_id_cache.lock must be held by caller */
  411. static int __erst_record_id_cache_add_one(void)
  412. {
  413. u64 id, prev_id, first_id;
  414. int i, rc;
  415. u64 *entries;
  416. unsigned long flags;
  417. id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID;
  418. retry:
  419. raw_spin_lock_irqsave(&erst_lock, flags);
  420. rc = __erst_get_next_record_id(&id);
  421. raw_spin_unlock_irqrestore(&erst_lock, flags);
  422. if (rc == -ENOENT)
  423. return 0;
  424. if (rc)
  425. return rc;
  426. if (id == APEI_ERST_INVALID_RECORD_ID)
  427. return 0;
  428. /* can not skip current ID, or loop back to first ID */
  429. if (id == prev_id || id == first_id)
  430. return 0;
  431. if (first_id == APEI_ERST_INVALID_RECORD_ID)
  432. first_id = id;
  433. prev_id = id;
  434. entries = erst_record_id_cache.entries;
  435. for (i = 0; i < erst_record_id_cache.len; i++) {
  436. if (entries[i] == id)
  437. break;
  438. }
  439. /* record id already in cache, try next */
  440. if (i < erst_record_id_cache.len)
  441. goto retry;
  442. if (erst_record_id_cache.len >= erst_record_id_cache.size) {
  443. int new_size;
  444. u64 *new_entries;
  445. new_size = erst_record_id_cache.size * 2;
  446. new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN,
  447. ERST_RECORD_ID_CACHE_SIZE_MAX);
  448. if (new_size <= erst_record_id_cache.size) {
  449. if (printk_ratelimit())
  450. pr_warn(FW_WARN "too many record IDs!\n");
  451. return 0;
  452. }
  453. new_entries = kvmalloc_array(new_size, sizeof(entries[0]),
  454. GFP_KERNEL);
  455. if (!new_entries)
  456. return -ENOMEM;
  457. memcpy(new_entries, entries,
  458. erst_record_id_cache.len * sizeof(entries[0]));
  459. kvfree(entries);
  460. erst_record_id_cache.entries = entries = new_entries;
  461. erst_record_id_cache.size = new_size;
  462. }
  463. entries[i] = id;
  464. erst_record_id_cache.len++;
  465. return 1;
  466. }
  467. /*
  468. * Get the record ID of an existing error record on the persistent
  469. * storage. If there is no error record on the persistent storage, the
  470. * returned record_id is APEI_ERST_INVALID_RECORD_ID.
  471. */
  472. int erst_get_record_id_next(int *pos, u64 *record_id)
  473. {
  474. int rc = 0;
  475. u64 *entries;
  476. if (erst_disable)
  477. return -ENODEV;
  478. /* must be enclosed by erst_get_record_id_begin/end */
  479. BUG_ON(!erst_record_id_cache.refcount);
  480. BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len);
  481. mutex_lock(&erst_record_id_cache.lock);
  482. entries = erst_record_id_cache.entries;
  483. for (; *pos < erst_record_id_cache.len; (*pos)++)
  484. if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID)
  485. break;
  486. /* found next record id in cache */
  487. if (*pos < erst_record_id_cache.len) {
  488. *record_id = entries[*pos];
  489. (*pos)++;
  490. goto out_unlock;
  491. }
  492. /* Try to add one more record ID to cache */
  493. rc = __erst_record_id_cache_add_one();
  494. if (rc < 0)
  495. goto out_unlock;
  496. /* successfully add one new ID */
  497. if (rc == 1) {
  498. *record_id = erst_record_id_cache.entries[*pos];
  499. (*pos)++;
  500. rc = 0;
  501. } else {
  502. *pos = -1;
  503. *record_id = APEI_ERST_INVALID_RECORD_ID;
  504. }
  505. out_unlock:
  506. mutex_unlock(&erst_record_id_cache.lock);
  507. return rc;
  508. }
  509. EXPORT_SYMBOL_GPL(erst_get_record_id_next);
  510. /* erst_record_id_cache.lock must be held by caller */
  511. static void __erst_record_id_cache_compact(void)
  512. {
  513. int i, wpos = 0;
  514. u64 *entries;
  515. if (erst_record_id_cache.refcount)
  516. return;
  517. entries = erst_record_id_cache.entries;
  518. for (i = 0; i < erst_record_id_cache.len; i++) {
  519. if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
  520. continue;
  521. if (wpos != i)
  522. entries[wpos] = entries[i];
  523. wpos++;
  524. }
  525. erst_record_id_cache.len = wpos;
  526. }
  527. void erst_get_record_id_end(void)
  528. {
  529. /*
  530. * erst_disable != 0 should be detected by invoker via the
  531. * return value of erst_get_record_id_begin/next, so this
  532. * function should not be called for erst_disable != 0.
  533. */
  534. BUG_ON(erst_disable);
  535. mutex_lock(&erst_record_id_cache.lock);
  536. erst_record_id_cache.refcount--;
  537. BUG_ON(erst_record_id_cache.refcount < 0);
  538. __erst_record_id_cache_compact();
  539. mutex_unlock(&erst_record_id_cache.lock);
  540. }
  541. EXPORT_SYMBOL_GPL(erst_get_record_id_end);
  542. static int __erst_write_to_storage(u64 offset)
  543. {
  544. struct apei_exec_context ctx;
  545. u64 timeout = FIRMWARE_TIMEOUT;
  546. u64 val;
  547. int rc;
  548. erst_exec_ctx_init(&ctx);
  549. rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
  550. if (rc)
  551. return rc;
  552. apei_exec_ctx_set_input(&ctx, offset);
  553. rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
  554. if (rc)
  555. return rc;
  556. rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
  557. if (rc)
  558. return rc;
  559. for (;;) {
  560. rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
  561. if (rc)
  562. return rc;
  563. val = apei_exec_ctx_get_output(&ctx);
  564. if (!val)
  565. break;
  566. if (erst_timedout(&timeout, SPIN_UNIT))
  567. return -EIO;
  568. }
  569. rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
  570. if (rc)
  571. return rc;
  572. val = apei_exec_ctx_get_output(&ctx);
  573. rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
  574. if (rc)
  575. return rc;
  576. return erst_errno(val);
  577. }
  578. static int __erst_read_from_storage(u64 record_id, u64 offset)
  579. {
  580. struct apei_exec_context ctx;
  581. u64 timeout = FIRMWARE_TIMEOUT;
  582. u64 val;
  583. int rc;
  584. erst_exec_ctx_init(&ctx);
  585. rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
  586. if (rc)
  587. return rc;
  588. apei_exec_ctx_set_input(&ctx, offset);
  589. rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
  590. if (rc)
  591. return rc;
  592. apei_exec_ctx_set_input(&ctx, record_id);
  593. rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
  594. if (rc)
  595. return rc;
  596. rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
  597. if (rc)
  598. return rc;
  599. for (;;) {
  600. rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
  601. if (rc)
  602. return rc;
  603. val = apei_exec_ctx_get_output(&ctx);
  604. if (!val)
  605. break;
  606. if (erst_timedout(&timeout, SPIN_UNIT))
  607. return -EIO;
  608. }
  609. rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
  610. if (rc)
  611. return rc;
  612. val = apei_exec_ctx_get_output(&ctx);
  613. rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
  614. if (rc)
  615. return rc;
  616. return erst_errno(val);
  617. }
  618. static int __erst_clear_from_storage(u64 record_id)
  619. {
  620. struct apei_exec_context ctx;
  621. u64 timeout = FIRMWARE_TIMEOUT;
  622. u64 val;
  623. int rc;
  624. erst_exec_ctx_init(&ctx);
  625. rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
  626. if (rc)
  627. return rc;
  628. apei_exec_ctx_set_input(&ctx, record_id);
  629. rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
  630. if (rc)
  631. return rc;
  632. rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
  633. if (rc)
  634. return rc;
  635. for (;;) {
  636. rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
  637. if (rc)
  638. return rc;
  639. val = apei_exec_ctx_get_output(&ctx);
  640. if (!val)
  641. break;
  642. if (erst_timedout(&timeout, SPIN_UNIT))
  643. return -EIO;
  644. }
  645. rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
  646. if (rc)
  647. return rc;
  648. val = apei_exec_ctx_get_output(&ctx);
  649. rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
  650. if (rc)
  651. return rc;
  652. return erst_errno(val);
  653. }
  654. /* NVRAM ERST Error Log Address Range is not supported yet */
  655. static void pr_unimpl_nvram(void)
  656. {
  657. if (printk_ratelimit())
  658. pr_warn("NVRAM ERST Log Address Range not implemented yet.\n");
  659. }
  660. static int __erst_write_to_nvram(const struct cper_record_header *record)
  661. {
  662. /* do not print message, because printk is not safe for NMI */
  663. return -ENOSYS;
  664. }
  665. static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
  666. {
  667. pr_unimpl_nvram();
  668. return -ENOSYS;
  669. }
  670. static int __erst_clear_from_nvram(u64 record_id)
  671. {
  672. pr_unimpl_nvram();
  673. return -ENOSYS;
  674. }
  675. int erst_write(const struct cper_record_header *record)
  676. {
  677. int rc;
  678. unsigned long flags;
  679. struct cper_record_header *rcd_erange;
  680. if (erst_disable)
  681. return -ENODEV;
  682. if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
  683. return -EINVAL;
  684. if (erst_erange.attr & ERST_RANGE_NVRAM) {
  685. if (!raw_spin_trylock_irqsave(&erst_lock, flags))
  686. return -EBUSY;
  687. rc = __erst_write_to_nvram(record);
  688. raw_spin_unlock_irqrestore(&erst_lock, flags);
  689. return rc;
  690. }
  691. if (record->record_length > erst_erange.size)
  692. return -EINVAL;
  693. if (!raw_spin_trylock_irqsave(&erst_lock, flags))
  694. return -EBUSY;
  695. memcpy(erst_erange.vaddr, record, record->record_length);
  696. rcd_erange = erst_erange.vaddr;
  697. /* signature for serialization system */
  698. memcpy(&rcd_erange->persistence_information, "ER", 2);
  699. rc = __erst_write_to_storage(0);
  700. raw_spin_unlock_irqrestore(&erst_lock, flags);
  701. return rc;
  702. }
  703. EXPORT_SYMBOL_GPL(erst_write);
  704. static int __erst_read_to_erange(u64 record_id, u64 *offset)
  705. {
  706. int rc;
  707. if (erst_erange.attr & ERST_RANGE_NVRAM)
  708. return __erst_read_to_erange_from_nvram(
  709. record_id, offset);
  710. rc = __erst_read_from_storage(record_id, 0);
  711. if (rc)
  712. return rc;
  713. *offset = 0;
  714. return 0;
  715. }
  716. static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
  717. size_t buflen)
  718. {
  719. int rc;
  720. u64 offset, len = 0;
  721. struct cper_record_header *rcd_tmp;
  722. rc = __erst_read_to_erange(record_id, &offset);
  723. if (rc)
  724. return rc;
  725. rcd_tmp = erst_erange.vaddr + offset;
  726. len = rcd_tmp->record_length;
  727. if (len <= buflen)
  728. memcpy(record, rcd_tmp, len);
  729. return len;
  730. }
  731. /*
  732. * If return value > buflen, the buffer size is not big enough,
  733. * else if return value < 0, something goes wrong,
  734. * else everything is OK, and return value is record length
  735. */
  736. ssize_t erst_read(u64 record_id, struct cper_record_header *record,
  737. size_t buflen)
  738. {
  739. ssize_t len;
  740. unsigned long flags;
  741. if (erst_disable)
  742. return -ENODEV;
  743. raw_spin_lock_irqsave(&erst_lock, flags);
  744. len = __erst_read(record_id, record, buflen);
  745. raw_spin_unlock_irqrestore(&erst_lock, flags);
  746. return len;
  747. }
  748. EXPORT_SYMBOL_GPL(erst_read);
  749. static void erst_clear_cache(u64 record_id)
  750. {
  751. int i;
  752. u64 *entries;
  753. mutex_lock(&erst_record_id_cache.lock);
  754. entries = erst_record_id_cache.entries;
  755. for (i = 0; i < erst_record_id_cache.len; i++) {
  756. if (entries[i] == record_id)
  757. entries[i] = APEI_ERST_INVALID_RECORD_ID;
  758. }
  759. __erst_record_id_cache_compact();
  760. mutex_unlock(&erst_record_id_cache.lock);
  761. }
  762. ssize_t erst_read_record(u64 record_id, struct cper_record_header *record,
  763. size_t buflen, size_t recordlen, const guid_t *creatorid)
  764. {
  765. ssize_t len;
  766. /*
  767. * if creatorid is NULL, read any record for erst-dbg module
  768. */
  769. if (creatorid == NULL) {
  770. len = erst_read(record_id, record, buflen);
  771. if (len == -ENOENT)
  772. erst_clear_cache(record_id);
  773. return len;
  774. }
  775. len = erst_read(record_id, record, buflen);
  776. /*
  777. * if erst_read return value is -ENOENT skip to next record_id,
  778. * and clear the record_id cache.
  779. */
  780. if (len == -ENOENT) {
  781. erst_clear_cache(record_id);
  782. goto out;
  783. }
  784. if (len < 0)
  785. goto out;
  786. /*
  787. * if erst_read return value is less than record head length,
  788. * consider it as -EIO, and clear the record_id cache.
  789. */
  790. if (len < recordlen) {
  791. len = -EIO;
  792. erst_clear_cache(record_id);
  793. goto out;
  794. }
  795. /*
  796. * if creatorid is not wanted, consider it as not found,
  797. * for skipping to next record_id.
  798. */
  799. if (!guid_equal(&record->creator_id, creatorid))
  800. len = -ENOENT;
  801. out:
  802. return len;
  803. }
  804. EXPORT_SYMBOL_GPL(erst_read_record);
  805. int erst_clear(u64 record_id)
  806. {
  807. int rc, i;
  808. unsigned long flags;
  809. u64 *entries;
  810. if (erst_disable)
  811. return -ENODEV;
  812. rc = mutex_lock_interruptible(&erst_record_id_cache.lock);
  813. if (rc)
  814. return rc;
  815. raw_spin_lock_irqsave(&erst_lock, flags);
  816. if (erst_erange.attr & ERST_RANGE_NVRAM)
  817. rc = __erst_clear_from_nvram(record_id);
  818. else
  819. rc = __erst_clear_from_storage(record_id);
  820. raw_spin_unlock_irqrestore(&erst_lock, flags);
  821. if (rc)
  822. goto out;
  823. entries = erst_record_id_cache.entries;
  824. for (i = 0; i < erst_record_id_cache.len; i++) {
  825. if (entries[i] == record_id)
  826. entries[i] = APEI_ERST_INVALID_RECORD_ID;
  827. }
  828. __erst_record_id_cache_compact();
  829. out:
  830. mutex_unlock(&erst_record_id_cache.lock);
  831. return rc;
  832. }
  833. EXPORT_SYMBOL_GPL(erst_clear);
  834. static int __init setup_erst_disable(char *str)
  835. {
  836. erst_disable = 1;
  837. return 1;
  838. }
  839. __setup("erst_disable", setup_erst_disable);
  840. static int erst_check_table(struct acpi_table_erst *erst_tab)
  841. {
  842. if ((erst_tab->header_length !=
  843. (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
  844. && (erst_tab->header_length != sizeof(struct acpi_table_erst)))
  845. return -EINVAL;
  846. if (erst_tab->header.length < sizeof(struct acpi_table_erst))
  847. return -EINVAL;
  848. if (erst_tab->entries !=
  849. (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
  850. sizeof(struct acpi_erst_entry))
  851. return -EINVAL;
  852. return 0;
  853. }
  854. static int erst_open_pstore(struct pstore_info *psi);
  855. static int erst_close_pstore(struct pstore_info *psi);
  856. static ssize_t erst_reader(struct pstore_record *record);
  857. static int erst_writer(struct pstore_record *record);
  858. static int erst_clearer(struct pstore_record *record);
  859. static struct pstore_info erst_info = {
  860. .owner = THIS_MODULE,
  861. .name = "erst",
  862. .flags = PSTORE_FLAGS_DMESG,
  863. .open = erst_open_pstore,
  864. .close = erst_close_pstore,
  865. .read = erst_reader,
  866. .write = erst_writer,
  867. .erase = erst_clearer
  868. };
  869. #define CPER_CREATOR_PSTORE \
  870. GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
  871. 0x64, 0x90, 0xb8, 0x9d)
  872. #define CPER_SECTION_TYPE_DMESG \
  873. GUID_INIT(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
  874. 0x94, 0x19, 0xeb, 0x12)
  875. #define CPER_SECTION_TYPE_DMESG_Z \
  876. GUID_INIT(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \
  877. 0x34, 0xdd, 0xfa, 0xc6)
  878. #define CPER_SECTION_TYPE_MCE \
  879. GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
  880. 0x04, 0x4a, 0x38, 0xfc)
  881. struct cper_pstore_record {
  882. struct cper_record_header hdr;
  883. struct cper_section_descriptor sec_hdr;
  884. char data[];
  885. } __packed;
  886. static int reader_pos;
  887. static int erst_open_pstore(struct pstore_info *psi)
  888. {
  889. if (erst_disable)
  890. return -ENODEV;
  891. return erst_get_record_id_begin(&reader_pos);
  892. }
  893. static int erst_close_pstore(struct pstore_info *psi)
  894. {
  895. erst_get_record_id_end();
  896. return 0;
  897. }
  898. static ssize_t erst_reader(struct pstore_record *record)
  899. {
  900. int rc;
  901. ssize_t len = 0;
  902. u64 record_id;
  903. struct cper_pstore_record *rcd;
  904. size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
  905. if (erst_disable)
  906. return -ENODEV;
  907. rcd = kmalloc(rcd_len, GFP_KERNEL);
  908. if (!rcd) {
  909. rc = -ENOMEM;
  910. goto out;
  911. }
  912. skip:
  913. rc = erst_get_record_id_next(&reader_pos, &record_id);
  914. if (rc)
  915. goto out;
  916. /* no more record */
  917. if (record_id == APEI_ERST_INVALID_RECORD_ID) {
  918. rc = -EINVAL;
  919. goto out;
  920. }
  921. len = erst_read_record(record_id, &rcd->hdr, rcd_len, sizeof(*rcd),
  922. &CPER_CREATOR_PSTORE);
  923. /* The record may be cleared by others, try read next record */
  924. if (len == -ENOENT)
  925. goto skip;
  926. else if (len < 0)
  927. goto out;
  928. record->buf = kmalloc(len, GFP_KERNEL);
  929. if (record->buf == NULL) {
  930. rc = -ENOMEM;
  931. goto out;
  932. }
  933. memcpy(record->buf, rcd->data, len - sizeof(*rcd));
  934. record->id = record_id;
  935. record->compressed = false;
  936. record->ecc_notice_size = 0;
  937. if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) {
  938. record->type = PSTORE_TYPE_DMESG;
  939. record->compressed = true;
  940. } else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG))
  941. record->type = PSTORE_TYPE_DMESG;
  942. else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_MCE))
  943. record->type = PSTORE_TYPE_MCE;
  944. else
  945. record->type = PSTORE_TYPE_MAX;
  946. if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP)
  947. record->time.tv_sec = rcd->hdr.timestamp;
  948. else
  949. record->time.tv_sec = 0;
  950. record->time.tv_nsec = 0;
  951. out:
  952. kfree(rcd);
  953. return (rc < 0) ? rc : (len - sizeof(*rcd));
  954. }
  955. static int erst_writer(struct pstore_record *record)
  956. {
  957. struct cper_pstore_record *rcd = (struct cper_pstore_record *)
  958. (erst_info.buf - sizeof(*rcd));
  959. int ret;
  960. memset(rcd, 0, sizeof(*rcd));
  961. memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
  962. rcd->hdr.revision = CPER_RECORD_REV;
  963. rcd->hdr.signature_end = CPER_SIG_END;
  964. rcd->hdr.section_count = 1;
  965. rcd->hdr.error_severity = CPER_SEV_FATAL;
  966. /* timestamp valid. platform_id, partition_id are invalid */
  967. rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP;
  968. rcd->hdr.timestamp = ktime_get_real_seconds();
  969. rcd->hdr.record_length = sizeof(*rcd) + record->size;
  970. rcd->hdr.creator_id = CPER_CREATOR_PSTORE;
  971. rcd->hdr.notification_type = CPER_NOTIFY_MCE;
  972. rcd->hdr.record_id = cper_next_record_id();
  973. rcd->hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
  974. rcd->sec_hdr.section_offset = sizeof(*rcd);
  975. rcd->sec_hdr.section_length = record->size;
  976. rcd->sec_hdr.revision = CPER_SEC_REV;
  977. /* fru_id and fru_text is invalid */
  978. rcd->sec_hdr.validation_bits = 0;
  979. rcd->sec_hdr.flags = CPER_SEC_PRIMARY;
  980. switch (record->type) {
  981. case PSTORE_TYPE_DMESG:
  982. if (record->compressed)
  983. rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG_Z;
  984. else
  985. rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
  986. break;
  987. case PSTORE_TYPE_MCE:
  988. rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
  989. break;
  990. default:
  991. return -EINVAL;
  992. }
  993. rcd->sec_hdr.section_severity = CPER_SEV_FATAL;
  994. ret = erst_write(&rcd->hdr);
  995. record->id = rcd->hdr.record_id;
  996. return ret;
  997. }
  998. static int erst_clearer(struct pstore_record *record)
  999. {
  1000. return erst_clear(record->id);
  1001. }
  1002. static int __init erst_init(void)
  1003. {
  1004. int rc = 0;
  1005. acpi_status status;
  1006. struct apei_exec_context ctx;
  1007. struct apei_resources erst_resources;
  1008. struct resource *r;
  1009. char *buf;
  1010. if (acpi_disabled)
  1011. goto err;
  1012. if (erst_disable) {
  1013. pr_info(
  1014. "Error Record Serialization Table (ERST) support is disabled.\n");
  1015. goto err;
  1016. }
  1017. status = acpi_get_table(ACPI_SIG_ERST, 0,
  1018. (struct acpi_table_header **)&erst_tab);
  1019. if (status == AE_NOT_FOUND)
  1020. goto err;
  1021. else if (ACPI_FAILURE(status)) {
  1022. const char *msg = acpi_format_exception(status);
  1023. pr_err("Failed to get table, %s\n", msg);
  1024. rc = -EINVAL;
  1025. goto err;
  1026. }
  1027. rc = erst_check_table(erst_tab);
  1028. if (rc) {
  1029. pr_err(FW_BUG "ERST table is invalid.\n");
  1030. goto err_put_erst_tab;
  1031. }
  1032. apei_resources_init(&erst_resources);
  1033. erst_exec_ctx_init(&ctx);
  1034. rc = apei_exec_collect_resources(&ctx, &erst_resources);
  1035. if (rc)
  1036. goto err_fini;
  1037. rc = apei_resources_request(&erst_resources, "APEI ERST");
  1038. if (rc)
  1039. goto err_fini;
  1040. rc = apei_exec_pre_map_gars(&ctx);
  1041. if (rc)
  1042. goto err_release;
  1043. rc = erst_get_erange(&erst_erange);
  1044. if (rc) {
  1045. if (rc == -ENODEV)
  1046. pr_info(
  1047. "The corresponding hardware device or firmware implementation "
  1048. "is not available.\n");
  1049. else
  1050. pr_err("Failed to get Error Log Address Range.\n");
  1051. goto err_unmap_reg;
  1052. }
  1053. r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
  1054. if (!r) {
  1055. pr_err("Can not request [mem %#010llx-%#010llx] for ERST.\n",
  1056. (unsigned long long)erst_erange.base,
  1057. (unsigned long long)erst_erange.base + erst_erange.size - 1);
  1058. rc = -EIO;
  1059. goto err_unmap_reg;
  1060. }
  1061. rc = -ENOMEM;
  1062. erst_erange.vaddr = ioremap_cache(erst_erange.base,
  1063. erst_erange.size);
  1064. if (!erst_erange.vaddr)
  1065. goto err_release_erange;
  1066. pr_info(
  1067. "Error Record Serialization Table (ERST) support is initialized.\n");
  1068. buf = kmalloc(erst_erange.size, GFP_KERNEL);
  1069. if (buf) {
  1070. erst_info.buf = buf + sizeof(struct cper_pstore_record);
  1071. erst_info.bufsize = erst_erange.size -
  1072. sizeof(struct cper_pstore_record);
  1073. rc = pstore_register(&erst_info);
  1074. if (rc) {
  1075. if (rc != -EPERM)
  1076. pr_info(
  1077. "Could not register with persistent store.\n");
  1078. erst_info.buf = NULL;
  1079. erst_info.bufsize = 0;
  1080. kfree(buf);
  1081. }
  1082. } else
  1083. pr_err(
  1084. "Failed to allocate %lld bytes for persistent store error log.\n",
  1085. erst_erange.size);
  1086. /* Cleanup ERST Resources */
  1087. apei_resources_fini(&erst_resources);
  1088. return 0;
  1089. err_release_erange:
  1090. release_mem_region(erst_erange.base, erst_erange.size);
  1091. err_unmap_reg:
  1092. apei_exec_post_unmap_gars(&ctx);
  1093. err_release:
  1094. apei_resources_release(&erst_resources);
  1095. err_fini:
  1096. apei_resources_fini(&erst_resources);
  1097. err_put_erst_tab:
  1098. acpi_put_table((struct acpi_table_header *)erst_tab);
  1099. err:
  1100. erst_disable = 1;
  1101. return rc;
  1102. }
  1103. device_initcall(erst_init);