mtdoops.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * MTD Oops/Panic logger
  4. *
  5. * Copyright © 2007 Nokia Corporation. All rights reserved.
  6. *
  7. * Author: Richard Purdie <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/console.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/sched.h>
  15. #include <linux/wait.h>
  16. #include <linux/delay.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/timekeeping.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/kmsg_dump.h>
  21. /* Maximum MTD partition size */
  22. #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
  23. static unsigned long record_size = 4096;
  24. module_param(record_size, ulong, 0400);
  25. MODULE_PARM_DESC(record_size,
  26. "record size for MTD OOPS pages in bytes (default 4096)");
  27. static char mtddev[80];
  28. module_param_string(mtddev, mtddev, 80, 0400);
  29. MODULE_PARM_DESC(mtddev,
  30. "name or index number of the MTD device to use");
  31. static int dump_oops = 1;
  32. module_param(dump_oops, int, 0600);
  33. MODULE_PARM_DESC(dump_oops,
  34. "set to 1 to dump oopses, 0 to only dump panics (default 1)");
  35. #define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00 /* Original */
  36. #define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00 /* Adds the timestamp */
  37. struct mtdoops_hdr {
  38. u32 seq;
  39. u32 magic;
  40. ktime_t timestamp;
  41. } __packed;
  42. static struct mtdoops_context {
  43. struct kmsg_dumper dump;
  44. int mtd_index;
  45. struct work_struct work_erase;
  46. struct work_struct work_write;
  47. struct mtd_info *mtd;
  48. int oops_pages;
  49. int nextpage;
  50. int nextcount;
  51. unsigned long *oops_page_used;
  52. unsigned long oops_buf_busy;
  53. void *oops_buf;
  54. } oops_cxt;
  55. static void mark_page_used(struct mtdoops_context *cxt, int page)
  56. {
  57. set_bit(page, cxt->oops_page_used);
  58. }
  59. static void mark_page_unused(struct mtdoops_context *cxt, int page)
  60. {
  61. clear_bit(page, cxt->oops_page_used);
  62. }
  63. static int page_is_used(struct mtdoops_context *cxt, int page)
  64. {
  65. return test_bit(page, cxt->oops_page_used);
  66. }
  67. static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
  68. {
  69. struct mtd_info *mtd = cxt->mtd;
  70. u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
  71. u32 start_page = start_page_offset / record_size;
  72. u32 erase_pages = mtd->erasesize / record_size;
  73. struct erase_info erase;
  74. int ret;
  75. int page;
  76. erase.addr = offset;
  77. erase.len = mtd->erasesize;
  78. ret = mtd_erase(mtd, &erase);
  79. if (ret) {
  80. printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
  81. (unsigned long long)erase.addr,
  82. (unsigned long long)erase.len, mtddev);
  83. return ret;
  84. }
  85. /* Mark pages as unused */
  86. for (page = start_page; page < start_page + erase_pages; page++)
  87. mark_page_unused(cxt, page);
  88. return 0;
  89. }
  90. static void mtdoops_inc_counter(struct mtdoops_context *cxt)
  91. {
  92. cxt->nextpage++;
  93. if (cxt->nextpage >= cxt->oops_pages)
  94. cxt->nextpage = 0;
  95. cxt->nextcount++;
  96. if (cxt->nextcount == 0xffffffff)
  97. cxt->nextcount = 0;
  98. if (page_is_used(cxt, cxt->nextpage)) {
  99. schedule_work(&cxt->work_erase);
  100. return;
  101. }
  102. printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
  103. cxt->nextpage, cxt->nextcount);
  104. }
  105. /* Scheduled work - when we can't proceed without erasing a block */
  106. static void mtdoops_workfunc_erase(struct work_struct *work)
  107. {
  108. struct mtdoops_context *cxt =
  109. container_of(work, struct mtdoops_context, work_erase);
  110. struct mtd_info *mtd = cxt->mtd;
  111. int i = 0, j, ret, mod;
  112. /* We were unregistered */
  113. if (!mtd)
  114. return;
  115. mod = (cxt->nextpage * record_size) % mtd->erasesize;
  116. if (mod != 0) {
  117. cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
  118. if (cxt->nextpage >= cxt->oops_pages)
  119. cxt->nextpage = 0;
  120. }
  121. while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
  122. badblock:
  123. printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
  124. cxt->nextpage * record_size);
  125. i++;
  126. cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
  127. if (cxt->nextpage >= cxt->oops_pages)
  128. cxt->nextpage = 0;
  129. if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
  130. printk(KERN_ERR "mtdoops: all blocks bad!\n");
  131. return;
  132. }
  133. }
  134. if (ret < 0) {
  135. printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
  136. return;
  137. }
  138. for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
  139. ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
  140. if (ret >= 0) {
  141. printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
  142. cxt->nextpage, cxt->nextcount);
  143. return;
  144. }
  145. if (ret == -EIO) {
  146. ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
  147. if (ret < 0 && ret != -EOPNOTSUPP) {
  148. printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
  149. return;
  150. }
  151. }
  152. goto badblock;
  153. }
  154. static void mtdoops_write(struct mtdoops_context *cxt, int panic)
  155. {
  156. struct mtd_info *mtd = cxt->mtd;
  157. size_t retlen;
  158. struct mtdoops_hdr *hdr;
  159. int ret;
  160. if (test_and_set_bit(0, &cxt->oops_buf_busy))
  161. return;
  162. /* Add mtdoops header to the buffer */
  163. hdr = (struct mtdoops_hdr *)cxt->oops_buf;
  164. hdr->seq = cxt->nextcount;
  165. hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2;
  166. hdr->timestamp = ktime_get_real();
  167. if (panic) {
  168. ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
  169. record_size, &retlen, cxt->oops_buf);
  170. if (ret == -EOPNOTSUPP) {
  171. printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
  172. goto out;
  173. }
  174. } else
  175. ret = mtd_write(mtd, cxt->nextpage * record_size,
  176. record_size, &retlen, cxt->oops_buf);
  177. if (retlen != record_size || ret < 0)
  178. printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
  179. cxt->nextpage * record_size, retlen, record_size, ret);
  180. mark_page_used(cxt, cxt->nextpage);
  181. memset(cxt->oops_buf, 0xff, record_size);
  182. mtdoops_inc_counter(cxt);
  183. out:
  184. clear_bit(0, &cxt->oops_buf_busy);
  185. }
  186. static void mtdoops_workfunc_write(struct work_struct *work)
  187. {
  188. struct mtdoops_context *cxt =
  189. container_of(work, struct mtdoops_context, work_write);
  190. mtdoops_write(cxt, 0);
  191. }
  192. static void find_next_position(struct mtdoops_context *cxt)
  193. {
  194. struct mtd_info *mtd = cxt->mtd;
  195. struct mtdoops_hdr hdr;
  196. int ret, page, maxpos = 0;
  197. u32 maxcount = 0xffffffff;
  198. size_t retlen;
  199. for (page = 0; page < cxt->oops_pages; page++) {
  200. if (mtd_block_isbad(mtd, page * record_size))
  201. continue;
  202. /* Assume the page is used */
  203. mark_page_used(cxt, page);
  204. ret = mtd_read(mtd, page * record_size, sizeof(hdr),
  205. &retlen, (u_char *)&hdr);
  206. if (retlen != sizeof(hdr) ||
  207. (ret < 0 && !mtd_is_bitflip(ret))) {
  208. printk(KERN_ERR "mtdoops: read failure at %ld (%zu of %zu read), err %d\n",
  209. page * record_size, retlen, sizeof(hdr), ret);
  210. continue;
  211. }
  212. if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff)
  213. mark_page_unused(cxt, page);
  214. if (hdr.seq == 0xffffffff ||
  215. (hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 &&
  216. hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2))
  217. continue;
  218. if (maxcount == 0xffffffff) {
  219. maxcount = hdr.seq;
  220. maxpos = page;
  221. } else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) {
  222. maxcount = hdr.seq;
  223. maxpos = page;
  224. } else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) {
  225. maxcount = hdr.seq;
  226. maxpos = page;
  227. } else if (hdr.seq > maxcount && hdr.seq > 0xc0000000
  228. && maxcount > 0x80000000) {
  229. maxcount = hdr.seq;
  230. maxpos = page;
  231. }
  232. }
  233. if (maxcount == 0xffffffff) {
  234. cxt->nextpage = cxt->oops_pages - 1;
  235. cxt->nextcount = 0;
  236. }
  237. else {
  238. cxt->nextpage = maxpos;
  239. cxt->nextcount = maxcount;
  240. }
  241. mtdoops_inc_counter(cxt);
  242. }
  243. static void mtdoops_do_dump(struct kmsg_dumper *dumper,
  244. enum kmsg_dump_reason reason)
  245. {
  246. struct mtdoops_context *cxt = container_of(dumper,
  247. struct mtdoops_context, dump);
  248. struct kmsg_dump_iter iter;
  249. /* Only dump oopses if dump_oops is set */
  250. if (reason == KMSG_DUMP_OOPS && !dump_oops)
  251. return;
  252. kmsg_dump_rewind(&iter);
  253. if (test_and_set_bit(0, &cxt->oops_buf_busy))
  254. return;
  255. kmsg_dump_get_buffer(&iter, true,
  256. cxt->oops_buf + sizeof(struct mtdoops_hdr),
  257. record_size - sizeof(struct mtdoops_hdr), NULL);
  258. clear_bit(0, &cxt->oops_buf_busy);
  259. if (reason != KMSG_DUMP_OOPS) {
  260. /* Panics must be written immediately */
  261. mtdoops_write(cxt, 1);
  262. } else {
  263. /* For other cases, schedule work to write it "nicely" */
  264. schedule_work(&cxt->work_write);
  265. }
  266. }
  267. static void mtdoops_notify_add(struct mtd_info *mtd)
  268. {
  269. struct mtdoops_context *cxt = &oops_cxt;
  270. u64 mtdoops_pages = div_u64(mtd->size, record_size);
  271. int err;
  272. if (!strcmp(mtd->name, mtddev))
  273. cxt->mtd_index = mtd->index;
  274. if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
  275. return;
  276. if (mtd->size < mtd->erasesize * 2) {
  277. printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
  278. mtd->index);
  279. return;
  280. }
  281. if (mtd->erasesize < record_size) {
  282. printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
  283. mtd->index);
  284. return;
  285. }
  286. if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
  287. printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
  288. mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
  289. return;
  290. }
  291. /* oops_page_used is a bit field */
  292. cxt->oops_page_used =
  293. vmalloc(array_size(sizeof(unsigned long),
  294. DIV_ROUND_UP(mtdoops_pages,
  295. BITS_PER_LONG)));
  296. if (!cxt->oops_page_used) {
  297. printk(KERN_ERR "mtdoops: could not allocate page array\n");
  298. return;
  299. }
  300. cxt->dump.max_reason = KMSG_DUMP_OOPS;
  301. cxt->dump.dump = mtdoops_do_dump;
  302. err = kmsg_dump_register(&cxt->dump);
  303. if (err) {
  304. printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
  305. vfree(cxt->oops_page_used);
  306. cxt->oops_page_used = NULL;
  307. return;
  308. }
  309. cxt->mtd = mtd;
  310. cxt->oops_pages = (int)mtd->size / record_size;
  311. find_next_position(cxt);
  312. printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
  313. }
  314. static void mtdoops_notify_remove(struct mtd_info *mtd)
  315. {
  316. struct mtdoops_context *cxt = &oops_cxt;
  317. if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
  318. return;
  319. if (kmsg_dump_unregister(&cxt->dump) < 0)
  320. printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
  321. cxt->mtd = NULL;
  322. flush_work(&cxt->work_erase);
  323. flush_work(&cxt->work_write);
  324. }
  325. static struct mtd_notifier mtdoops_notifier = {
  326. .add = mtdoops_notify_add,
  327. .remove = mtdoops_notify_remove,
  328. };
  329. static int __init mtdoops_init(void)
  330. {
  331. struct mtdoops_context *cxt = &oops_cxt;
  332. int mtd_index;
  333. char *endp;
  334. if (strlen(mtddev) == 0) {
  335. printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
  336. return -EINVAL;
  337. }
  338. if ((record_size & 4095) != 0) {
  339. printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
  340. return -EINVAL;
  341. }
  342. if (record_size < 4096) {
  343. printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
  344. return -EINVAL;
  345. }
  346. /* Setup the MTD device to use */
  347. cxt->mtd_index = -1;
  348. mtd_index = simple_strtoul(mtddev, &endp, 0);
  349. if (*endp == '\0')
  350. cxt->mtd_index = mtd_index;
  351. cxt->oops_buf = vmalloc(record_size);
  352. if (!cxt->oops_buf)
  353. return -ENOMEM;
  354. memset(cxt->oops_buf, 0xff, record_size);
  355. cxt->oops_buf_busy = 0;
  356. INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
  357. INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
  358. register_mtd_user(&mtdoops_notifier);
  359. return 0;
  360. }
  361. static void __exit mtdoops_exit(void)
  362. {
  363. struct mtdoops_context *cxt = &oops_cxt;
  364. unregister_mtd_user(&mtdoops_notifier);
  365. vfree(cxt->oops_buf);
  366. vfree(cxt->oops_page_used);
  367. }
  368. module_init(mtdoops_init);
  369. module_exit(mtdoops_exit);
  370. MODULE_LICENSE("GPL");
  371. MODULE_AUTHOR("Richard Purdie <[email protected]>");
  372. MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");