buffer_head_io.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * io.c
  4. *
  5. * Buffer cache handling
  6. *
  7. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  8. */
  9. #include <linux/fs.h>
  10. #include <linux/types.h>
  11. #include <linux/highmem.h>
  12. #include <linux/bio.h>
  13. #include <cluster/masklog.h>
  14. #include "ocfs2.h"
  15. #include "alloc.h"
  16. #include "inode.h"
  17. #include "journal.h"
  18. #include "uptodate.h"
  19. #include "buffer_head_io.h"
  20. #include "ocfs2_trace.h"
  21. /*
  22. * Bits on bh->b_state used by ocfs2.
  23. *
  24. * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
  25. */
  26. enum ocfs2_state_bits {
  27. BH_NeedsValidate = BH_JBDPrivateStart,
  28. };
  29. /* Expand the magic b_state functions */
  30. BUFFER_FNS(NeedsValidate, needs_validate);
  31. int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
  32. struct ocfs2_caching_info *ci)
  33. {
  34. int ret = 0;
  35. trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
  36. BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
  37. BUG_ON(buffer_jbd(bh));
  38. /* No need to check for a soft readonly file system here. non
  39. * journalled writes are only ever done on system files which
  40. * can get modified during recovery even if read-only. */
  41. if (ocfs2_is_hard_readonly(osb)) {
  42. ret = -EROFS;
  43. mlog_errno(ret);
  44. goto out;
  45. }
  46. ocfs2_metadata_cache_io_lock(ci);
  47. lock_buffer(bh);
  48. set_buffer_uptodate(bh);
  49. /* remove from dirty list before I/O. */
  50. clear_buffer_dirty(bh);
  51. get_bh(bh); /* for end_buffer_write_sync() */
  52. bh->b_end_io = end_buffer_write_sync;
  53. submit_bh(REQ_OP_WRITE, bh);
  54. wait_on_buffer(bh);
  55. if (buffer_uptodate(bh)) {
  56. ocfs2_set_buffer_uptodate(ci, bh);
  57. } else {
  58. /* We don't need to remove the clustered uptodate
  59. * information for this bh as it's not marked locally
  60. * uptodate. */
  61. ret = -EIO;
  62. mlog_errno(ret);
  63. }
  64. ocfs2_metadata_cache_io_unlock(ci);
  65. out:
  66. return ret;
  67. }
  68. /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
  69. * will be easier to handle read failure.
  70. */
  71. int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
  72. unsigned int nr, struct buffer_head *bhs[])
  73. {
  74. int status = 0;
  75. unsigned int i;
  76. struct buffer_head *bh;
  77. int new_bh = 0;
  78. trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
  79. if (!nr)
  80. goto bail;
  81. /* Don't put buffer head and re-assign it to NULL if it is allocated
  82. * outside since the caller can't be aware of this alternation!
  83. */
  84. new_bh = (bhs[0] == NULL);
  85. for (i = 0 ; i < nr ; i++) {
  86. if (bhs[i] == NULL) {
  87. bhs[i] = sb_getblk(osb->sb, block++);
  88. if (bhs[i] == NULL) {
  89. status = -ENOMEM;
  90. mlog_errno(status);
  91. break;
  92. }
  93. }
  94. bh = bhs[i];
  95. if (buffer_jbd(bh)) {
  96. trace_ocfs2_read_blocks_sync_jbd(
  97. (unsigned long long)bh->b_blocknr);
  98. continue;
  99. }
  100. if (buffer_dirty(bh)) {
  101. /* This should probably be a BUG, or
  102. * at least return an error. */
  103. mlog(ML_ERROR,
  104. "trying to sync read a dirty "
  105. "buffer! (blocknr = %llu), skipping\n",
  106. (unsigned long long)bh->b_blocknr);
  107. continue;
  108. }
  109. lock_buffer(bh);
  110. if (buffer_jbd(bh)) {
  111. #ifdef CATCH_BH_JBD_RACES
  112. mlog(ML_ERROR,
  113. "block %llu had the JBD bit set "
  114. "while I was in lock_buffer!",
  115. (unsigned long long)bh->b_blocknr);
  116. BUG();
  117. #else
  118. unlock_buffer(bh);
  119. continue;
  120. #endif
  121. }
  122. get_bh(bh); /* for end_buffer_read_sync() */
  123. bh->b_end_io = end_buffer_read_sync;
  124. submit_bh(REQ_OP_READ, bh);
  125. }
  126. read_failure:
  127. for (i = nr; i > 0; i--) {
  128. bh = bhs[i - 1];
  129. if (unlikely(status)) {
  130. if (new_bh && bh) {
  131. /* If middle bh fails, let previous bh
  132. * finish its read and then put it to
  133. * aovoid bh leak
  134. */
  135. if (!buffer_jbd(bh))
  136. wait_on_buffer(bh);
  137. put_bh(bh);
  138. bhs[i - 1] = NULL;
  139. } else if (bh && buffer_uptodate(bh)) {
  140. clear_buffer_uptodate(bh);
  141. }
  142. continue;
  143. }
  144. /* No need to wait on the buffer if it's managed by JBD. */
  145. if (!buffer_jbd(bh))
  146. wait_on_buffer(bh);
  147. if (!buffer_uptodate(bh)) {
  148. /* Status won't be cleared from here on out,
  149. * so we can safely record this and loop back
  150. * to cleanup the other buffers. */
  151. status = -EIO;
  152. goto read_failure;
  153. }
  154. }
  155. bail:
  156. return status;
  157. }
  158. /* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
  159. * will be easier to handle read failure.
  160. */
  161. int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
  162. struct buffer_head *bhs[], int flags,
  163. int (*validate)(struct super_block *sb,
  164. struct buffer_head *bh))
  165. {
  166. int status = 0;
  167. int i, ignore_cache = 0;
  168. struct buffer_head *bh;
  169. struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
  170. int new_bh = 0;
  171. trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
  172. BUG_ON(!ci);
  173. BUG_ON((flags & OCFS2_BH_READAHEAD) &&
  174. (flags & OCFS2_BH_IGNORE_CACHE));
  175. if (bhs == NULL) {
  176. status = -EINVAL;
  177. mlog_errno(status);
  178. goto bail;
  179. }
  180. if (nr < 0) {
  181. mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
  182. status = -EINVAL;
  183. mlog_errno(status);
  184. goto bail;
  185. }
  186. if (nr == 0) {
  187. status = 0;
  188. goto bail;
  189. }
  190. /* Don't put buffer head and re-assign it to NULL if it is allocated
  191. * outside since the caller can't be aware of this alternation!
  192. */
  193. new_bh = (bhs[0] == NULL);
  194. ocfs2_metadata_cache_io_lock(ci);
  195. for (i = 0 ; i < nr ; i++) {
  196. if (bhs[i] == NULL) {
  197. bhs[i] = sb_getblk(sb, block++);
  198. if (bhs[i] == NULL) {
  199. ocfs2_metadata_cache_io_unlock(ci);
  200. status = -ENOMEM;
  201. mlog_errno(status);
  202. /* Don't forget to put previous bh! */
  203. break;
  204. }
  205. }
  206. bh = bhs[i];
  207. ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
  208. /* There are three read-ahead cases here which we need to
  209. * be concerned with. All three assume a buffer has
  210. * previously been submitted with OCFS2_BH_READAHEAD
  211. * and it hasn't yet completed I/O.
  212. *
  213. * 1) The current request is sync to disk. This rarely
  214. * happens these days, and never when performance
  215. * matters - the code can just wait on the buffer
  216. * lock and re-submit.
  217. *
  218. * 2) The current request is cached, but not
  219. * readahead. ocfs2_buffer_uptodate() will return
  220. * false anyway, so we'll wind up waiting on the
  221. * buffer lock to do I/O. We re-check the request
  222. * with after getting the lock to avoid a re-submit.
  223. *
  224. * 3) The current request is readahead (and so must
  225. * also be a caching one). We short circuit if the
  226. * buffer is locked (under I/O) and if it's in the
  227. * uptodate cache. The re-check from #2 catches the
  228. * case that the previous read-ahead completes just
  229. * before our is-it-in-flight check.
  230. */
  231. if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
  232. trace_ocfs2_read_blocks_from_disk(
  233. (unsigned long long)bh->b_blocknr,
  234. (unsigned long long)ocfs2_metadata_cache_owner(ci));
  235. /* We're using ignore_cache here to say
  236. * "go to disk" */
  237. ignore_cache = 1;
  238. }
  239. trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
  240. ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
  241. if (buffer_jbd(bh)) {
  242. continue;
  243. }
  244. if (ignore_cache) {
  245. if (buffer_dirty(bh)) {
  246. /* This should probably be a BUG, or
  247. * at least return an error. */
  248. continue;
  249. }
  250. /* A read-ahead request was made - if the
  251. * buffer is already under read-ahead from a
  252. * previously submitted request than we are
  253. * done here. */
  254. if ((flags & OCFS2_BH_READAHEAD)
  255. && ocfs2_buffer_read_ahead(ci, bh))
  256. continue;
  257. lock_buffer(bh);
  258. if (buffer_jbd(bh)) {
  259. #ifdef CATCH_BH_JBD_RACES
  260. mlog(ML_ERROR, "block %llu had the JBD bit set "
  261. "while I was in lock_buffer!",
  262. (unsigned long long)bh->b_blocknr);
  263. BUG();
  264. #else
  265. unlock_buffer(bh);
  266. continue;
  267. #endif
  268. }
  269. /* Re-check ocfs2_buffer_uptodate() as a
  270. * previously read-ahead buffer may have
  271. * completed I/O while we were waiting for the
  272. * buffer lock. */
  273. if (!(flags & OCFS2_BH_IGNORE_CACHE)
  274. && !(flags & OCFS2_BH_READAHEAD)
  275. && ocfs2_buffer_uptodate(ci, bh)) {
  276. unlock_buffer(bh);
  277. continue;
  278. }
  279. get_bh(bh); /* for end_buffer_read_sync() */
  280. if (validate)
  281. set_buffer_needs_validate(bh);
  282. bh->b_end_io = end_buffer_read_sync;
  283. submit_bh(REQ_OP_READ, bh);
  284. continue;
  285. }
  286. }
  287. read_failure:
  288. for (i = (nr - 1); i >= 0; i--) {
  289. bh = bhs[i];
  290. if (!(flags & OCFS2_BH_READAHEAD)) {
  291. if (unlikely(status)) {
  292. /* Clear the buffers on error including those
  293. * ever succeeded in reading
  294. */
  295. if (new_bh && bh) {
  296. /* If middle bh fails, let previous bh
  297. * finish its read and then put it to
  298. * aovoid bh leak
  299. */
  300. if (!buffer_jbd(bh))
  301. wait_on_buffer(bh);
  302. put_bh(bh);
  303. bhs[i] = NULL;
  304. } else if (bh && buffer_uptodate(bh)) {
  305. clear_buffer_uptodate(bh);
  306. }
  307. continue;
  308. }
  309. /* We know this can't have changed as we hold the
  310. * owner sem. Avoid doing any work on the bh if the
  311. * journal has it. */
  312. if (!buffer_jbd(bh))
  313. wait_on_buffer(bh);
  314. if (!buffer_uptodate(bh)) {
  315. /* Status won't be cleared from here on out,
  316. * so we can safely record this and loop back
  317. * to cleanup the other buffers. Don't need to
  318. * remove the clustered uptodate information
  319. * for this bh as it's not marked locally
  320. * uptodate. */
  321. status = -EIO;
  322. clear_buffer_needs_validate(bh);
  323. goto read_failure;
  324. }
  325. if (buffer_needs_validate(bh)) {
  326. /* We never set NeedsValidate if the
  327. * buffer was held by the journal, so
  328. * that better not have changed */
  329. BUG_ON(buffer_jbd(bh));
  330. clear_buffer_needs_validate(bh);
  331. status = validate(sb, bh);
  332. if (status)
  333. goto read_failure;
  334. }
  335. }
  336. /* Always set the buffer in the cache, even if it was
  337. * a forced read, or read-ahead which hasn't yet
  338. * completed. */
  339. ocfs2_set_buffer_uptodate(ci, bh);
  340. }
  341. ocfs2_metadata_cache_io_unlock(ci);
  342. trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
  343. flags, ignore_cache);
  344. bail:
  345. return status;
  346. }
  347. /* Check whether the blkno is the super block or one of the backups. */
  348. static void ocfs2_check_super_or_backup(struct super_block *sb,
  349. sector_t blkno)
  350. {
  351. int i;
  352. u64 backup_blkno;
  353. if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
  354. return;
  355. for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
  356. backup_blkno = ocfs2_backup_super_blkno(sb, i);
  357. if (backup_blkno == blkno)
  358. return;
  359. }
  360. BUG();
  361. }
  362. /*
  363. * Write super block and backups doesn't need to collaborate with journal,
  364. * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
  365. * into this function.
  366. */
  367. int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
  368. struct buffer_head *bh)
  369. {
  370. int ret = 0;
  371. struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
  372. BUG_ON(buffer_jbd(bh));
  373. ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
  374. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
  375. ret = -EROFS;
  376. mlog_errno(ret);
  377. goto out;
  378. }
  379. lock_buffer(bh);
  380. set_buffer_uptodate(bh);
  381. /* remove from dirty list before I/O. */
  382. clear_buffer_dirty(bh);
  383. get_bh(bh); /* for end_buffer_write_sync() */
  384. bh->b_end_io = end_buffer_write_sync;
  385. ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
  386. submit_bh(REQ_OP_WRITE, bh);
  387. wait_on_buffer(bh);
  388. if (!buffer_uptodate(bh)) {
  389. ret = -EIO;
  390. mlog_errno(ret);
  391. }
  392. out:
  393. return ret;
  394. }