jfs_metapage.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) International Business Machines Corp., 2000-2005
  4. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/module.h>
  9. #include <linux/bio.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/mempool.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/writeback.h>
  16. #include "jfs_incore.h"
  17. #include "jfs_superblock.h"
  18. #include "jfs_filsys.h"
  19. #include "jfs_metapage.h"
  20. #include "jfs_txnmgr.h"
  21. #include "jfs_debug.h"
  22. #ifdef CONFIG_JFS_STATISTICS
  23. static struct {
  24. uint pagealloc; /* # of page allocations */
  25. uint pagefree; /* # of page frees */
  26. uint lockwait; /* # of sleeping lock_metapage() calls */
  27. } mpStat;
  28. #endif
  29. #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
  30. #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
  31. static inline void unlock_metapage(struct metapage *mp)
  32. {
  33. clear_bit_unlock(META_locked, &mp->flag);
  34. wake_up(&mp->wait);
  35. }
  36. static inline void __lock_metapage(struct metapage *mp)
  37. {
  38. DECLARE_WAITQUEUE(wait, current);
  39. INCREMENT(mpStat.lockwait);
  40. add_wait_queue_exclusive(&mp->wait, &wait);
  41. do {
  42. set_current_state(TASK_UNINTERRUPTIBLE);
  43. if (metapage_locked(mp)) {
  44. unlock_page(mp->page);
  45. io_schedule();
  46. lock_page(mp->page);
  47. }
  48. } while (trylock_metapage(mp));
  49. __set_current_state(TASK_RUNNING);
  50. remove_wait_queue(&mp->wait, &wait);
  51. }
  52. /*
  53. * Must have mp->page locked
  54. */
  55. static inline void lock_metapage(struct metapage *mp)
  56. {
  57. if (trylock_metapage(mp))
  58. __lock_metapage(mp);
  59. }
  60. #define METAPOOL_MIN_PAGES 32
  61. static struct kmem_cache *metapage_cache;
  62. static mempool_t *metapage_mempool;
  63. #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
  64. #if MPS_PER_PAGE > 1
  65. struct meta_anchor {
  66. int mp_count;
  67. atomic_t io_count;
  68. struct metapage *mp[MPS_PER_PAGE];
  69. };
  70. #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
  71. static inline struct metapage *page_to_mp(struct page *page, int offset)
  72. {
  73. if (!PagePrivate(page))
  74. return NULL;
  75. return mp_anchor(page)->mp[offset >> L2PSIZE];
  76. }
  77. static inline int insert_metapage(struct page *page, struct metapage *mp)
  78. {
  79. struct meta_anchor *a;
  80. int index;
  81. int l2mp_blocks; /* log2 blocks per metapage */
  82. if (PagePrivate(page))
  83. a = mp_anchor(page);
  84. else {
  85. a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
  86. if (!a)
  87. return -ENOMEM;
  88. set_page_private(page, (unsigned long)a);
  89. SetPagePrivate(page);
  90. kmap(page);
  91. }
  92. if (mp) {
  93. l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
  94. index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
  95. a->mp_count++;
  96. a->mp[index] = mp;
  97. }
  98. return 0;
  99. }
  100. static inline void remove_metapage(struct page *page, struct metapage *mp)
  101. {
  102. struct meta_anchor *a = mp_anchor(page);
  103. int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
  104. int index;
  105. index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
  106. BUG_ON(a->mp[index] != mp);
  107. a->mp[index] = NULL;
  108. if (--a->mp_count == 0) {
  109. kfree(a);
  110. set_page_private(page, 0);
  111. ClearPagePrivate(page);
  112. kunmap(page);
  113. }
  114. }
  115. static inline void inc_io(struct page *page)
  116. {
  117. atomic_inc(&mp_anchor(page)->io_count);
  118. }
  119. static inline void dec_io(struct page *page, void (*handler) (struct page *))
  120. {
  121. if (atomic_dec_and_test(&mp_anchor(page)->io_count))
  122. handler(page);
  123. }
  124. #else
  125. static inline struct metapage *page_to_mp(struct page *page, int offset)
  126. {
  127. return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
  128. }
  129. static inline int insert_metapage(struct page *page, struct metapage *mp)
  130. {
  131. if (mp) {
  132. set_page_private(page, (unsigned long)mp);
  133. SetPagePrivate(page);
  134. kmap(page);
  135. }
  136. return 0;
  137. }
  138. static inline void remove_metapage(struct page *page, struct metapage *mp)
  139. {
  140. set_page_private(page, 0);
  141. ClearPagePrivate(page);
  142. kunmap(page);
  143. }
  144. #define inc_io(page) do {} while(0)
  145. #define dec_io(page, handler) handler(page)
  146. #endif
  147. static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
  148. {
  149. struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
  150. if (mp) {
  151. mp->lid = 0;
  152. mp->lsn = 0;
  153. mp->data = NULL;
  154. mp->clsn = 0;
  155. mp->log = NULL;
  156. init_waitqueue_head(&mp->wait);
  157. }
  158. return mp;
  159. }
  160. static inline void free_metapage(struct metapage *mp)
  161. {
  162. mempool_free(mp, metapage_mempool);
  163. }
  164. int __init metapage_init(void)
  165. {
  166. /*
  167. * Allocate the metapage structures
  168. */
  169. metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
  170. 0, 0, NULL);
  171. if (metapage_cache == NULL)
  172. return -ENOMEM;
  173. metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
  174. metapage_cache);
  175. if (metapage_mempool == NULL) {
  176. kmem_cache_destroy(metapage_cache);
  177. return -ENOMEM;
  178. }
  179. return 0;
  180. }
  181. void metapage_exit(void)
  182. {
  183. mempool_destroy(metapage_mempool);
  184. kmem_cache_destroy(metapage_cache);
  185. }
  186. static inline void drop_metapage(struct page *page, struct metapage *mp)
  187. {
  188. if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
  189. test_bit(META_io, &mp->flag))
  190. return;
  191. remove_metapage(page, mp);
  192. INCREMENT(mpStat.pagefree);
  193. free_metapage(mp);
  194. }
  195. /*
  196. * Metapage address space operations
  197. */
  198. static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
  199. int *len)
  200. {
  201. int rc = 0;
  202. int xflag;
  203. s64 xaddr;
  204. sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  205. inode->i_blkbits;
  206. if (lblock >= file_blocks)
  207. return 0;
  208. if (lblock + *len > file_blocks)
  209. *len = file_blocks - lblock;
  210. if (inode->i_ino) {
  211. rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
  212. if ((rc == 0) && *len)
  213. lblock = (sector_t)xaddr;
  214. else
  215. lblock = 0;
  216. } /* else no mapping */
  217. return lblock;
  218. }
  219. static void last_read_complete(struct page *page)
  220. {
  221. if (!PageError(page))
  222. SetPageUptodate(page);
  223. unlock_page(page);
  224. }
  225. static void metapage_read_end_io(struct bio *bio)
  226. {
  227. struct page *page = bio->bi_private;
  228. if (bio->bi_status) {
  229. printk(KERN_ERR "metapage_read_end_io: I/O error\n");
  230. SetPageError(page);
  231. }
  232. dec_io(page, last_read_complete);
  233. bio_put(bio);
  234. }
  235. static void remove_from_logsync(struct metapage *mp)
  236. {
  237. struct jfs_log *log = mp->log;
  238. unsigned long flags;
  239. /*
  240. * This can race. Recheck that log hasn't been set to null, and after
  241. * acquiring logsync lock, recheck lsn
  242. */
  243. if (!log)
  244. return;
  245. LOGSYNC_LOCK(log, flags);
  246. if (mp->lsn) {
  247. mp->log = NULL;
  248. mp->lsn = 0;
  249. mp->clsn = 0;
  250. log->count--;
  251. list_del(&mp->synclist);
  252. }
  253. LOGSYNC_UNLOCK(log, flags);
  254. }
  255. static void last_write_complete(struct page *page)
  256. {
  257. struct metapage *mp;
  258. unsigned int offset;
  259. for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
  260. mp = page_to_mp(page, offset);
  261. if (mp && test_bit(META_io, &mp->flag)) {
  262. if (mp->lsn)
  263. remove_from_logsync(mp);
  264. clear_bit(META_io, &mp->flag);
  265. }
  266. /*
  267. * I'd like to call drop_metapage here, but I don't think it's
  268. * safe unless I have the page locked
  269. */
  270. }
  271. end_page_writeback(page);
  272. }
  273. static void metapage_write_end_io(struct bio *bio)
  274. {
  275. struct page *page = bio->bi_private;
  276. BUG_ON(!PagePrivate(page));
  277. if (bio->bi_status) {
  278. printk(KERN_ERR "metapage_write_end_io: I/O error\n");
  279. SetPageError(page);
  280. }
  281. dec_io(page, last_write_complete);
  282. bio_put(bio);
  283. }
  284. static int metapage_writepage(struct page *page, struct writeback_control *wbc)
  285. {
  286. struct bio *bio = NULL;
  287. int block_offset; /* block offset of mp within page */
  288. struct inode *inode = page->mapping->host;
  289. int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
  290. int len;
  291. int xlen;
  292. struct metapage *mp;
  293. int redirty = 0;
  294. sector_t lblock;
  295. int nr_underway = 0;
  296. sector_t pblock;
  297. sector_t next_block = 0;
  298. sector_t page_start;
  299. unsigned long bio_bytes = 0;
  300. unsigned long bio_offset = 0;
  301. int offset;
  302. int bad_blocks = 0;
  303. page_start = (sector_t)page->index <<
  304. (PAGE_SHIFT - inode->i_blkbits);
  305. BUG_ON(!PageLocked(page));
  306. BUG_ON(PageWriteback(page));
  307. set_page_writeback(page);
  308. for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
  309. mp = page_to_mp(page, offset);
  310. if (!mp || !test_bit(META_dirty, &mp->flag))
  311. continue;
  312. if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
  313. redirty = 1;
  314. /*
  315. * Make sure this page isn't blocked indefinitely.
  316. * If the journal isn't undergoing I/O, push it
  317. */
  318. if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
  319. jfs_flush_journal(mp->log, 0);
  320. continue;
  321. }
  322. clear_bit(META_dirty, &mp->flag);
  323. set_bit(META_io, &mp->flag);
  324. block_offset = offset >> inode->i_blkbits;
  325. lblock = page_start + block_offset;
  326. if (bio) {
  327. if (xlen && lblock == next_block) {
  328. /* Contiguous, in memory & on disk */
  329. len = min(xlen, blocks_per_mp);
  330. xlen -= len;
  331. bio_bytes += len << inode->i_blkbits;
  332. continue;
  333. }
  334. /* Not contiguous */
  335. if (bio_add_page(bio, page, bio_bytes, bio_offset) <
  336. bio_bytes)
  337. goto add_failed;
  338. /*
  339. * Increment counter before submitting i/o to keep
  340. * count from hitting zero before we're through
  341. */
  342. inc_io(page);
  343. if (!bio->bi_iter.bi_size)
  344. goto dump_bio;
  345. submit_bio(bio);
  346. nr_underway++;
  347. bio = NULL;
  348. } else
  349. inc_io(page);
  350. xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
  351. pblock = metapage_get_blocks(inode, lblock, &xlen);
  352. if (!pblock) {
  353. printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
  354. /*
  355. * We already called inc_io(), but can't cancel it
  356. * with dec_io() until we're done with the page
  357. */
  358. bad_blocks++;
  359. continue;
  360. }
  361. len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
  362. bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
  363. bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
  364. bio->bi_end_io = metapage_write_end_io;
  365. bio->bi_private = page;
  366. /* Don't call bio_add_page yet, we may add to this vec */
  367. bio_offset = offset;
  368. bio_bytes = len << inode->i_blkbits;
  369. xlen -= len;
  370. next_block = lblock + len;
  371. }
  372. if (bio) {
  373. if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
  374. goto add_failed;
  375. if (!bio->bi_iter.bi_size)
  376. goto dump_bio;
  377. submit_bio(bio);
  378. nr_underway++;
  379. }
  380. if (redirty)
  381. redirty_page_for_writepage(wbc, page);
  382. unlock_page(page);
  383. if (bad_blocks)
  384. goto err_out;
  385. if (nr_underway == 0)
  386. end_page_writeback(page);
  387. return 0;
  388. add_failed:
  389. /* We should never reach here, since we're only adding one vec */
  390. printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
  391. goto skip;
  392. dump_bio:
  393. print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
  394. 4, bio, sizeof(*bio), 0);
  395. skip:
  396. bio_put(bio);
  397. unlock_page(page);
  398. dec_io(page, last_write_complete);
  399. err_out:
  400. while (bad_blocks--)
  401. dec_io(page, last_write_complete);
  402. return -EIO;
  403. }
  404. static int metapage_read_folio(struct file *fp, struct folio *folio)
  405. {
  406. struct page *page = &folio->page;
  407. struct inode *inode = page->mapping->host;
  408. struct bio *bio = NULL;
  409. int block_offset;
  410. int blocks_per_page = i_blocks_per_page(inode, page);
  411. sector_t page_start; /* address of page in fs blocks */
  412. sector_t pblock;
  413. int xlen;
  414. unsigned int len;
  415. int offset;
  416. BUG_ON(!PageLocked(page));
  417. page_start = (sector_t)page->index <<
  418. (PAGE_SHIFT - inode->i_blkbits);
  419. block_offset = 0;
  420. while (block_offset < blocks_per_page) {
  421. xlen = blocks_per_page - block_offset;
  422. pblock = metapage_get_blocks(inode, page_start + block_offset,
  423. &xlen);
  424. if (pblock) {
  425. if (!PagePrivate(page))
  426. insert_metapage(page, NULL);
  427. inc_io(page);
  428. if (bio)
  429. submit_bio(bio);
  430. bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
  431. GFP_NOFS);
  432. bio->bi_iter.bi_sector =
  433. pblock << (inode->i_blkbits - 9);
  434. bio->bi_end_io = metapage_read_end_io;
  435. bio->bi_private = page;
  436. len = xlen << inode->i_blkbits;
  437. offset = block_offset << inode->i_blkbits;
  438. if (bio_add_page(bio, page, len, offset) < len)
  439. goto add_failed;
  440. block_offset += xlen;
  441. } else
  442. block_offset++;
  443. }
  444. if (bio)
  445. submit_bio(bio);
  446. else
  447. unlock_page(page);
  448. return 0;
  449. add_failed:
  450. printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
  451. bio_put(bio);
  452. dec_io(page, last_read_complete);
  453. return -EIO;
  454. }
  455. static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
  456. {
  457. struct metapage *mp;
  458. bool ret = true;
  459. int offset;
  460. for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
  461. mp = page_to_mp(&folio->page, offset);
  462. if (!mp)
  463. continue;
  464. jfs_info("metapage_release_folio: mp = 0x%p", mp);
  465. if (mp->count || mp->nohomeok ||
  466. test_bit(META_dirty, &mp->flag)) {
  467. jfs_info("count = %ld, nohomeok = %d", mp->count,
  468. mp->nohomeok);
  469. ret = false;
  470. continue;
  471. }
  472. if (mp->lsn)
  473. remove_from_logsync(mp);
  474. remove_metapage(&folio->page, mp);
  475. INCREMENT(mpStat.pagefree);
  476. free_metapage(mp);
  477. }
  478. return ret;
  479. }
  480. static void metapage_invalidate_folio(struct folio *folio, size_t offset,
  481. size_t length)
  482. {
  483. BUG_ON(offset || length < folio_size(folio));
  484. BUG_ON(folio_test_writeback(folio));
  485. metapage_release_folio(folio, 0);
  486. }
  487. const struct address_space_operations jfs_metapage_aops = {
  488. .read_folio = metapage_read_folio,
  489. .writepage = metapage_writepage,
  490. .release_folio = metapage_release_folio,
  491. .invalidate_folio = metapage_invalidate_folio,
  492. .dirty_folio = filemap_dirty_folio,
  493. };
  494. struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
  495. unsigned int size, int absolute,
  496. unsigned long new)
  497. {
  498. int l2BlocksPerPage;
  499. int l2bsize;
  500. struct address_space *mapping;
  501. struct metapage *mp = NULL;
  502. struct page *page;
  503. unsigned long page_index;
  504. unsigned long page_offset;
  505. jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
  506. inode->i_ino, lblock, absolute);
  507. l2bsize = inode->i_blkbits;
  508. l2BlocksPerPage = PAGE_SHIFT - l2bsize;
  509. page_index = lblock >> l2BlocksPerPage;
  510. page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
  511. if ((page_offset + size) > PAGE_SIZE) {
  512. jfs_err("MetaData crosses page boundary!!");
  513. jfs_err("lblock = %lx, size = %d", lblock, size);
  514. dump_stack();
  515. return NULL;
  516. }
  517. if (absolute)
  518. mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
  519. else {
  520. /*
  521. * If an nfs client tries to read an inode that is larger
  522. * than any existing inodes, we may try to read past the
  523. * end of the inode map
  524. */
  525. if ((lblock << inode->i_blkbits) >= inode->i_size)
  526. return NULL;
  527. mapping = inode->i_mapping;
  528. }
  529. if (new && (PSIZE == PAGE_SIZE)) {
  530. page = grab_cache_page(mapping, page_index);
  531. if (!page) {
  532. jfs_err("grab_cache_page failed!");
  533. return NULL;
  534. }
  535. SetPageUptodate(page);
  536. } else {
  537. page = read_mapping_page(mapping, page_index, NULL);
  538. if (IS_ERR(page)) {
  539. jfs_err("read_mapping_page failed!");
  540. return NULL;
  541. }
  542. lock_page(page);
  543. }
  544. mp = page_to_mp(page, page_offset);
  545. if (mp) {
  546. if (mp->logical_size != size) {
  547. jfs_error(inode->i_sb,
  548. "get_mp->logical_size != size\n");
  549. jfs_err("logical_size = %d, size = %d",
  550. mp->logical_size, size);
  551. dump_stack();
  552. goto unlock;
  553. }
  554. mp->count++;
  555. lock_metapage(mp);
  556. if (test_bit(META_discard, &mp->flag)) {
  557. if (!new) {
  558. jfs_error(inode->i_sb,
  559. "using a discarded metapage\n");
  560. discard_metapage(mp);
  561. goto unlock;
  562. }
  563. clear_bit(META_discard, &mp->flag);
  564. }
  565. } else {
  566. INCREMENT(mpStat.pagealloc);
  567. mp = alloc_metapage(GFP_NOFS);
  568. if (!mp)
  569. goto unlock;
  570. mp->page = page;
  571. mp->sb = inode->i_sb;
  572. mp->flag = 0;
  573. mp->xflag = COMMIT_PAGE;
  574. mp->count = 1;
  575. mp->nohomeok = 0;
  576. mp->logical_size = size;
  577. mp->data = page_address(page) + page_offset;
  578. mp->index = lblock;
  579. if (unlikely(insert_metapage(page, mp))) {
  580. free_metapage(mp);
  581. goto unlock;
  582. }
  583. lock_metapage(mp);
  584. }
  585. if (new) {
  586. jfs_info("zeroing mp = 0x%p", mp);
  587. memset(mp->data, 0, PSIZE);
  588. }
  589. unlock_page(page);
  590. jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
  591. return mp;
  592. unlock:
  593. unlock_page(page);
  594. return NULL;
  595. }
  596. void grab_metapage(struct metapage * mp)
  597. {
  598. jfs_info("grab_metapage: mp = 0x%p", mp);
  599. get_page(mp->page);
  600. lock_page(mp->page);
  601. mp->count++;
  602. lock_metapage(mp);
  603. unlock_page(mp->page);
  604. }
  605. void force_metapage(struct metapage *mp)
  606. {
  607. struct page *page = mp->page;
  608. jfs_info("force_metapage: mp = 0x%p", mp);
  609. set_bit(META_forcewrite, &mp->flag);
  610. clear_bit(META_sync, &mp->flag);
  611. get_page(page);
  612. lock_page(page);
  613. set_page_dirty(page);
  614. if (write_one_page(page))
  615. jfs_error(mp->sb, "write_one_page() failed\n");
  616. clear_bit(META_forcewrite, &mp->flag);
  617. put_page(page);
  618. }
  619. void hold_metapage(struct metapage *mp)
  620. {
  621. lock_page(mp->page);
  622. }
  623. void put_metapage(struct metapage *mp)
  624. {
  625. if (mp->count || mp->nohomeok) {
  626. /* Someone else will release this */
  627. unlock_page(mp->page);
  628. return;
  629. }
  630. get_page(mp->page);
  631. mp->count++;
  632. lock_metapage(mp);
  633. unlock_page(mp->page);
  634. release_metapage(mp);
  635. }
  636. void release_metapage(struct metapage * mp)
  637. {
  638. struct page *page = mp->page;
  639. jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
  640. BUG_ON(!page);
  641. lock_page(page);
  642. unlock_metapage(mp);
  643. assert(mp->count);
  644. if (--mp->count || mp->nohomeok) {
  645. unlock_page(page);
  646. put_page(page);
  647. return;
  648. }
  649. if (test_bit(META_dirty, &mp->flag)) {
  650. set_page_dirty(page);
  651. if (test_bit(META_sync, &mp->flag)) {
  652. clear_bit(META_sync, &mp->flag);
  653. if (write_one_page(page))
  654. jfs_error(mp->sb, "write_one_page() failed\n");
  655. lock_page(page); /* write_one_page unlocks the page */
  656. }
  657. } else if (mp->lsn) /* discard_metapage doesn't remove it */
  658. remove_from_logsync(mp);
  659. /* Try to keep metapages from using up too much memory */
  660. drop_metapage(page, mp);
  661. unlock_page(page);
  662. put_page(page);
  663. }
  664. void __invalidate_metapages(struct inode *ip, s64 addr, int len)
  665. {
  666. sector_t lblock;
  667. int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
  668. int BlocksPerPage = 1 << l2BlocksPerPage;
  669. /* All callers are interested in block device's mapping */
  670. struct address_space *mapping =
  671. JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
  672. struct metapage *mp;
  673. struct page *page;
  674. unsigned int offset;
  675. /*
  676. * Mark metapages to discard. They will eventually be
  677. * released, but should not be written.
  678. */
  679. for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
  680. lblock += BlocksPerPage) {
  681. page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
  682. if (!page)
  683. continue;
  684. for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
  685. mp = page_to_mp(page, offset);
  686. if (!mp)
  687. continue;
  688. if (mp->index < addr)
  689. continue;
  690. if (mp->index >= addr + len)
  691. break;
  692. clear_bit(META_dirty, &mp->flag);
  693. set_bit(META_discard, &mp->flag);
  694. if (mp->lsn)
  695. remove_from_logsync(mp);
  696. }
  697. unlock_page(page);
  698. put_page(page);
  699. }
  700. }
  701. #ifdef CONFIG_JFS_STATISTICS
  702. int jfs_mpstat_proc_show(struct seq_file *m, void *v)
  703. {
  704. seq_printf(m,
  705. "JFS Metapage statistics\n"
  706. "=======================\n"
  707. "page allocations = %d\n"
  708. "page frees = %d\n"
  709. "lock waits = %d\n",
  710. mpStat.pagealloc,
  711. mpStat.pagefree,
  712. mpStat.lockwait);
  713. return 0;
  714. }
  715. #endif