mtdpstore.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define dev_fmt(fmt) "mtdoops-pstore: " fmt
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/pstore_blk.h>
  6. #include <linux/mtd/mtd.h>
  7. #include <linux/bitops.h>
  8. #include <linux/slab.h>
  9. static struct mtdpstore_context {
  10. int index;
  11. struct pstore_blk_config info;
  12. struct pstore_device_info dev;
  13. struct mtd_info *mtd;
  14. unsigned long *rmmap; /* removed bit map */
  15. unsigned long *usedmap; /* used bit map */
  16. /*
  17. * used for panic write
  18. * As there are no block_isbad for panic case, we should keep this
  19. * status before panic to ensure panic_write not failed.
  20. */
  21. unsigned long *badmap; /* bad block bit map */
  22. } oops_cxt;
  23. static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
  24. {
  25. int ret;
  26. struct mtd_info *mtd = cxt->mtd;
  27. u64 blknum;
  28. off = ALIGN_DOWN(off, mtd->erasesize);
  29. blknum = div_u64(off, mtd->erasesize);
  30. if (test_bit(blknum, cxt->badmap))
  31. return true;
  32. ret = mtd_block_isbad(mtd, off);
  33. if (ret < 0) {
  34. dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
  35. return ret;
  36. } else if (ret > 0) {
  37. set_bit(blknum, cxt->badmap);
  38. return true;
  39. }
  40. return false;
  41. }
  42. static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
  43. loff_t off)
  44. {
  45. struct mtd_info *mtd = cxt->mtd;
  46. u64 blknum;
  47. off = ALIGN_DOWN(off, mtd->erasesize);
  48. blknum = div_u64(off, mtd->erasesize);
  49. return test_bit(blknum, cxt->badmap);
  50. }
  51. static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
  52. loff_t off)
  53. {
  54. struct mtd_info *mtd = cxt->mtd;
  55. u64 zonenum = div_u64(off, cxt->info.kmsg_size);
  56. dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
  57. set_bit(zonenum, cxt->usedmap);
  58. }
  59. static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
  60. loff_t off)
  61. {
  62. struct mtd_info *mtd = cxt->mtd;
  63. u64 zonenum = div_u64(off, cxt->info.kmsg_size);
  64. dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
  65. clear_bit(zonenum, cxt->usedmap);
  66. }
  67. static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
  68. loff_t off)
  69. {
  70. struct mtd_info *mtd = cxt->mtd;
  71. u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
  72. u64 zonenum;
  73. off = ALIGN_DOWN(off, mtd->erasesize);
  74. zonenum = div_u64(off, cxt->info.kmsg_size);
  75. while (zonecnt > 0) {
  76. dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
  77. clear_bit(zonenum, cxt->usedmap);
  78. zonenum++;
  79. zonecnt--;
  80. }
  81. }
  82. static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
  83. {
  84. u64 zonenum = div_u64(off, cxt->info.kmsg_size);
  85. u64 blknum = div_u64(off, cxt->mtd->erasesize);
  86. if (test_bit(blknum, cxt->badmap))
  87. return true;
  88. return test_bit(zonenum, cxt->usedmap);
  89. }
  90. static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
  91. loff_t off)
  92. {
  93. struct mtd_info *mtd = cxt->mtd;
  94. u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
  95. u64 zonenum;
  96. off = ALIGN_DOWN(off, mtd->erasesize);
  97. zonenum = div_u64(off, cxt->info.kmsg_size);
  98. while (zonecnt > 0) {
  99. if (test_bit(zonenum, cxt->usedmap))
  100. return true;
  101. zonenum++;
  102. zonecnt--;
  103. }
  104. return false;
  105. }
  106. static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
  107. size_t size)
  108. {
  109. struct mtd_info *mtd = cxt->mtd;
  110. size_t sz;
  111. int i;
  112. sz = min_t(uint32_t, size, mtd->writesize / 4);
  113. for (i = 0; i < sz; i++) {
  114. if (buf[i] != (char)0xFF)
  115. return false;
  116. }
  117. return true;
  118. }
  119. static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
  120. {
  121. struct mtd_info *mtd = cxt->mtd;
  122. u64 zonenum = div_u64(off, cxt->info.kmsg_size);
  123. dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
  124. set_bit(zonenum, cxt->rmmap);
  125. }
  126. static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
  127. loff_t off)
  128. {
  129. struct mtd_info *mtd = cxt->mtd;
  130. u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
  131. u64 zonenum;
  132. off = ALIGN_DOWN(off, mtd->erasesize);
  133. zonenum = div_u64(off, cxt->info.kmsg_size);
  134. while (zonecnt > 0) {
  135. clear_bit(zonenum, cxt->rmmap);
  136. zonenum++;
  137. zonecnt--;
  138. }
  139. }
  140. static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
  141. loff_t off)
  142. {
  143. struct mtd_info *mtd = cxt->mtd;
  144. u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
  145. u64 zonenum;
  146. off = ALIGN_DOWN(off, mtd->erasesize);
  147. zonenum = div_u64(off, cxt->info.kmsg_size);
  148. while (zonecnt > 0) {
  149. if (test_bit(zonenum, cxt->rmmap))
  150. return true;
  151. zonenum++;
  152. zonecnt--;
  153. }
  154. return false;
  155. }
  156. static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
  157. {
  158. struct mtd_info *mtd = cxt->mtd;
  159. struct erase_info erase;
  160. int ret;
  161. off = ALIGN_DOWN(off, cxt->mtd->erasesize);
  162. dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
  163. erase.len = cxt->mtd->erasesize;
  164. erase.addr = off;
  165. ret = mtd_erase(cxt->mtd, &erase);
  166. if (!ret)
  167. mtdpstore_block_clear_removed(cxt, off);
  168. else
  169. dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
  170. (unsigned long long)erase.addr,
  171. (unsigned long long)erase.len, cxt->info.device);
  172. return ret;
  173. }
  174. /*
  175. * called while removing file
  176. *
  177. * Avoiding over erasing, do erase block only when the whole block is unused.
  178. * If the block contains valid log, do erase lazily on flush_removed() when
  179. * unregister.
  180. */
  181. static ssize_t mtdpstore_erase(size_t size, loff_t off)
  182. {
  183. struct mtdpstore_context *cxt = &oops_cxt;
  184. if (mtdpstore_block_isbad(cxt, off))
  185. return -EIO;
  186. mtdpstore_mark_unused(cxt, off);
  187. /* If the block still has valid data, mtdpstore do erase lazily */
  188. if (likely(mtdpstore_block_is_used(cxt, off))) {
  189. mtdpstore_mark_removed(cxt, off);
  190. return 0;
  191. }
  192. /* all zones are unused, erase it */
  193. return mtdpstore_erase_do(cxt, off);
  194. }
  195. /*
  196. * What is security for mtdpstore?
  197. * As there is no erase for panic case, we should ensure at least one zone
  198. * is writable. Otherwise, panic write will fail.
  199. * If zone is used, write operation will return -ENOMSG, which means that
  200. * pstore/blk will try one by one until gets an empty zone. So, it is not
  201. * needed to ensure the next zone is empty, but at least one.
  202. */
  203. static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
  204. {
  205. int ret = 0, i;
  206. struct mtd_info *mtd = cxt->mtd;
  207. u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
  208. u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
  209. u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
  210. u32 erasesize = cxt->mtd->erasesize;
  211. for (i = 0; i < zonecnt; i++) {
  212. u32 num = (zonenum + i) % zonecnt;
  213. /* found empty zone */
  214. if (!test_bit(num, cxt->usedmap))
  215. return 0;
  216. }
  217. /* If there is no any empty zone, we have no way but to do erase */
  218. while (blkcnt--) {
  219. div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
  220. if (mtdpstore_block_isbad(cxt, off))
  221. continue;
  222. ret = mtdpstore_erase_do(cxt, off);
  223. if (!ret) {
  224. mtdpstore_block_mark_unused(cxt, off);
  225. break;
  226. }
  227. }
  228. if (ret)
  229. dev_err(&mtd->dev, "all blocks bad!\n");
  230. dev_dbg(&mtd->dev, "end security\n");
  231. return ret;
  232. }
  233. static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
  234. {
  235. struct mtdpstore_context *cxt = &oops_cxt;
  236. struct mtd_info *mtd = cxt->mtd;
  237. size_t retlen;
  238. int ret;
  239. if (mtdpstore_block_isbad(cxt, off))
  240. return -ENOMSG;
  241. /* zone is used, please try next one */
  242. if (mtdpstore_is_used(cxt, off))
  243. return -ENOMSG;
  244. dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
  245. ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
  246. if (ret < 0 || retlen != size) {
  247. dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
  248. off, retlen, size, ret);
  249. return -EIO;
  250. }
  251. mtdpstore_mark_used(cxt, off);
  252. mtdpstore_security(cxt, off);
  253. return retlen;
  254. }
  255. static inline bool mtdpstore_is_io_error(int ret)
  256. {
  257. return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
  258. }
  259. /*
  260. * All zones will be read as pstore/blk will read zone one by one when do
  261. * recover.
  262. */
  263. static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
  264. {
  265. struct mtdpstore_context *cxt = &oops_cxt;
  266. struct mtd_info *mtd = cxt->mtd;
  267. size_t retlen, done;
  268. int ret;
  269. if (mtdpstore_block_isbad(cxt, off))
  270. return -ENOMSG;
  271. dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
  272. for (done = 0, retlen = 0; done < size; done += retlen) {
  273. retlen = 0;
  274. ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
  275. (u_char *)buf + done);
  276. if (mtdpstore_is_io_error(ret)) {
  277. dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
  278. off + done, retlen, size - done, ret);
  279. /* the zone may be broken, try next one */
  280. return -ENOMSG;
  281. }
  282. /*
  283. * ECC error. The impact on log data is so small. Maybe we can
  284. * still read it and try to understand. So mtdpstore just hands
  285. * over what it gets and user can judge whether the data is
  286. * valid or not.
  287. */
  288. if (mtd_is_eccerr(ret)) {
  289. dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
  290. off + done, retlen, size - done, ret);
  291. /* driver may not set retlen when ecc error */
  292. retlen = retlen == 0 ? size - done : retlen;
  293. }
  294. }
  295. if (mtdpstore_is_empty(cxt, buf, size))
  296. mtdpstore_mark_unused(cxt, off);
  297. else
  298. mtdpstore_mark_used(cxt, off);
  299. mtdpstore_security(cxt, off);
  300. return retlen;
  301. }
  302. static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
  303. {
  304. struct mtdpstore_context *cxt = &oops_cxt;
  305. struct mtd_info *mtd = cxt->mtd;
  306. size_t retlen;
  307. int ret;
  308. if (mtdpstore_panic_block_isbad(cxt, off))
  309. return -ENOMSG;
  310. /* zone is used, please try next one */
  311. if (mtdpstore_is_used(cxt, off))
  312. return -ENOMSG;
  313. ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
  314. if (ret < 0 || size != retlen) {
  315. dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
  316. off, retlen, size, ret);
  317. return -EIO;
  318. }
  319. mtdpstore_mark_used(cxt, off);
  320. return retlen;
  321. }
  322. static void mtdpstore_notify_add(struct mtd_info *mtd)
  323. {
  324. int ret;
  325. struct mtdpstore_context *cxt = &oops_cxt;
  326. struct pstore_blk_config *info = &cxt->info;
  327. unsigned long longcnt;
  328. if (!strcmp(mtd->name, info->device))
  329. cxt->index = mtd->index;
  330. if (mtd->index != cxt->index || cxt->index < 0)
  331. return;
  332. dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
  333. if (mtd->size < info->kmsg_size * 2) {
  334. dev_err(&mtd->dev, "MTD partition %d not big enough\n",
  335. mtd->index);
  336. return;
  337. }
  338. /*
  339. * kmsg_size must be aligned to 4096 Bytes, which is limited by
  340. * psblk. The default value of kmsg_size is 64KB. If kmsg_size
  341. * is larger than erasesize, some errors will occur since mtdpstore
  342. * is designed on it.
  343. */
  344. if (mtd->erasesize < info->kmsg_size) {
  345. dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
  346. mtd->index);
  347. return;
  348. }
  349. if (unlikely(info->kmsg_size % mtd->writesize)) {
  350. dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
  351. info->kmsg_size / 1024,
  352. mtd->writesize / 1024);
  353. return;
  354. }
  355. longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
  356. cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
  357. cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
  358. longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
  359. cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
  360. /* just support dmesg right now */
  361. cxt->dev.flags = PSTORE_FLAGS_DMESG;
  362. cxt->dev.zone.read = mtdpstore_read;
  363. cxt->dev.zone.write = mtdpstore_write;
  364. cxt->dev.zone.erase = mtdpstore_erase;
  365. cxt->dev.zone.panic_write = mtdpstore_panic_write;
  366. cxt->dev.zone.total_size = mtd->size;
  367. ret = register_pstore_device(&cxt->dev);
  368. if (ret) {
  369. dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
  370. mtd->index);
  371. return;
  372. }
  373. cxt->mtd = mtd;
  374. dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
  375. }
  376. static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
  377. loff_t off, size_t size)
  378. {
  379. struct mtd_info *mtd = cxt->mtd;
  380. u_char *buf;
  381. int ret;
  382. size_t retlen;
  383. struct erase_info erase;
  384. buf = kmalloc(mtd->erasesize, GFP_KERNEL);
  385. if (!buf)
  386. return -ENOMEM;
  387. /* 1st. read to cache */
  388. ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
  389. if (mtdpstore_is_io_error(ret))
  390. goto free;
  391. /* 2nd. erase block */
  392. erase.len = mtd->erasesize;
  393. erase.addr = off;
  394. ret = mtd_erase(mtd, &erase);
  395. if (ret)
  396. goto free;
  397. /* 3rd. write back */
  398. while (size) {
  399. unsigned int zonesize = cxt->info.kmsg_size;
  400. /* there is valid data on block, write back */
  401. if (mtdpstore_is_used(cxt, off)) {
  402. ret = mtd_write(mtd, off, zonesize, &retlen, buf);
  403. if (ret)
  404. dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
  405. off, retlen, zonesize, ret);
  406. }
  407. off += zonesize;
  408. size -= min_t(unsigned int, zonesize, size);
  409. }
  410. free:
  411. kfree(buf);
  412. return ret;
  413. }
  414. /*
  415. * What does mtdpstore_flush_removed() do?
  416. * When user remove any log file on pstore filesystem, mtdpstore should do
  417. * something to ensure log file removed. If the whole block is no longer used,
  418. * it's nice to erase the block. However if the block still contains valid log,
  419. * what mtdpstore can do is to erase and write the valid log back.
  420. */
  421. static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
  422. {
  423. struct mtd_info *mtd = cxt->mtd;
  424. int ret;
  425. loff_t off;
  426. u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
  427. for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
  428. ret = mtdpstore_block_isbad(cxt, off);
  429. if (ret)
  430. continue;
  431. ret = mtdpstore_block_is_removed(cxt, off);
  432. if (!ret)
  433. continue;
  434. ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
  435. if (ret)
  436. return ret;
  437. }
  438. return 0;
  439. }
  440. static void mtdpstore_notify_remove(struct mtd_info *mtd)
  441. {
  442. struct mtdpstore_context *cxt = &oops_cxt;
  443. if (mtd->index != cxt->index || cxt->index < 0)
  444. return;
  445. mtdpstore_flush_removed(cxt);
  446. unregister_pstore_device(&cxt->dev);
  447. kfree(cxt->badmap);
  448. kfree(cxt->usedmap);
  449. kfree(cxt->rmmap);
  450. cxt->mtd = NULL;
  451. cxt->index = -1;
  452. }
  453. static struct mtd_notifier mtdpstore_notifier = {
  454. .add = mtdpstore_notify_add,
  455. .remove = mtdpstore_notify_remove,
  456. };
  457. static int __init mtdpstore_init(void)
  458. {
  459. int ret;
  460. struct mtdpstore_context *cxt = &oops_cxt;
  461. struct pstore_blk_config *info = &cxt->info;
  462. ret = pstore_blk_get_config(info);
  463. if (unlikely(ret))
  464. return ret;
  465. if (strlen(info->device) == 0) {
  466. pr_err("mtd device must be supplied (device name is empty)\n");
  467. return -EINVAL;
  468. }
  469. if (!info->kmsg_size) {
  470. pr_err("no backend enabled (kmsg_size is 0)\n");
  471. return -EINVAL;
  472. }
  473. /* Setup the MTD device to use */
  474. ret = kstrtoint((char *)info->device, 0, &cxt->index);
  475. if (ret)
  476. cxt->index = -1;
  477. register_mtd_user(&mtdpstore_notifier);
  478. return 0;
  479. }
  480. module_init(mtdpstore_init);
  481. static void __exit mtdpstore_exit(void)
  482. {
  483. unregister_mtd_user(&mtdpstore_notifier);
  484. }
  485. module_exit(mtdpstore_exit);
  486. MODULE_LICENSE("GPL");
  487. MODULE_AUTHOR("WeiXiong Liao <[email protected]>");
  488. MODULE_DESCRIPTION("MTD backend for pstore/blk");