glops.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  4. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  5. */
  6. #include <linux/spinlock.h>
  7. #include <linux/completion.h>
  8. #include <linux/buffer_head.h>
  9. #include <linux/gfs2_ondisk.h>
  10. #include <linux/bio.h>
  11. #include <linux/posix_acl.h>
  12. #include <linux/security.h>
  13. #include "gfs2.h"
  14. #include "incore.h"
  15. #include "bmap.h"
  16. #include "glock.h"
  17. #include "glops.h"
  18. #include "inode.h"
  19. #include "log.h"
  20. #include "meta_io.h"
  21. #include "recovery.h"
  22. #include "rgrp.h"
  23. #include "util.h"
  24. #include "trans.h"
  25. #include "dir.h"
  26. #include "lops.h"
  27. struct workqueue_struct *gfs2_freeze_wq;
  28. extern struct workqueue_struct *gfs2_control_wq;
  29. static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
  30. {
  31. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  32. fs_err(sdp,
  33. "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
  34. "state 0x%lx\n",
  35. bh, (unsigned long long)bh->b_blocknr, bh->b_state,
  36. bh->b_page->mapping, bh->b_page->flags);
  37. fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
  38. gl->gl_name.ln_type, gl->gl_name.ln_number,
  39. gfs2_glock2aspace(gl));
  40. gfs2_lm(sdp, "AIL error\n");
  41. gfs2_withdraw_delayed(sdp);
  42. }
  43. /**
  44. * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
  45. * @gl: the glock
  46. * @fsync: set when called from fsync (not all buffers will be clean)
  47. * @nr_revokes: Number of buffers to revoke
  48. *
  49. * None of the buffers should be dirty, locked, or pinned.
  50. */
  51. static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
  52. unsigned int nr_revokes)
  53. {
  54. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  55. struct list_head *head = &gl->gl_ail_list;
  56. struct gfs2_bufdata *bd, *tmp;
  57. struct buffer_head *bh;
  58. const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
  59. gfs2_log_lock(sdp);
  60. spin_lock(&sdp->sd_ail_lock);
  61. list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
  62. if (nr_revokes == 0)
  63. break;
  64. bh = bd->bd_bh;
  65. if (bh->b_state & b_state) {
  66. if (fsync)
  67. continue;
  68. gfs2_ail_error(gl, bh);
  69. }
  70. gfs2_trans_add_revoke(sdp, bd);
  71. nr_revokes--;
  72. }
  73. GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
  74. spin_unlock(&sdp->sd_ail_lock);
  75. gfs2_log_unlock(sdp);
  76. }
  77. static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
  78. {
  79. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  80. struct gfs2_trans tr;
  81. unsigned int revokes;
  82. int ret;
  83. revokes = atomic_read(&gl->gl_ail_count);
  84. if (!revokes) {
  85. bool have_revokes;
  86. bool log_in_flight;
  87. /*
  88. * We have nothing on the ail, but there could be revokes on
  89. * the sdp revoke queue, in which case, we still want to flush
  90. * the log and wait for it to finish.
  91. *
  92. * If the sdp revoke list is empty too, we might still have an
  93. * io outstanding for writing revokes, so we should wait for
  94. * it before returning.
  95. *
  96. * If none of these conditions are true, our revokes are all
  97. * flushed and we can return.
  98. */
  99. gfs2_log_lock(sdp);
  100. have_revokes = !list_empty(&sdp->sd_log_revokes);
  101. log_in_flight = atomic_read(&sdp->sd_log_in_flight);
  102. gfs2_log_unlock(sdp);
  103. if (have_revokes)
  104. goto flush;
  105. if (log_in_flight)
  106. log_flush_wait(sdp);
  107. return 0;
  108. }
  109. memset(&tr, 0, sizeof(tr));
  110. set_bit(TR_ONSTACK, &tr.tr_flags);
  111. ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
  112. if (ret)
  113. goto flush;
  114. __gfs2_ail_flush(gl, 0, revokes);
  115. gfs2_trans_end(sdp);
  116. flush:
  117. gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
  118. GFS2_LFC_AIL_EMPTY_GL);
  119. return 0;
  120. }
  121. void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
  122. {
  123. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  124. unsigned int revokes = atomic_read(&gl->gl_ail_count);
  125. int ret;
  126. if (!revokes)
  127. return;
  128. ret = gfs2_trans_begin(sdp, 0, revokes);
  129. if (ret)
  130. return;
  131. __gfs2_ail_flush(gl, fsync, revokes);
  132. gfs2_trans_end(sdp);
  133. gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
  134. GFS2_LFC_AIL_FLUSH);
  135. }
  136. /**
  137. * gfs2_rgrp_metasync - sync out the metadata of a resource group
  138. * @gl: the glock protecting the resource group
  139. *
  140. */
  141. static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
  142. {
  143. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  144. struct address_space *metamapping = &sdp->sd_aspace;
  145. struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
  146. const unsigned bsize = sdp->sd_sb.sb_bsize;
  147. loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
  148. loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
  149. int error;
  150. filemap_fdatawrite_range(metamapping, start, end);
  151. error = filemap_fdatawait_range(metamapping, start, end);
  152. WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
  153. mapping_set_error(metamapping, error);
  154. if (error)
  155. gfs2_io_error(sdp);
  156. return error;
  157. }
  158. /**
  159. * rgrp_go_sync - sync out the metadata for this glock
  160. * @gl: the glock
  161. *
  162. * Called when demoting or unlocking an EX glock. We must flush
  163. * to disk all dirty buffers/pages relating to this glock, and must not
  164. * return to caller to demote/unlock the glock until I/O is complete.
  165. */
  166. static int rgrp_go_sync(struct gfs2_glock *gl)
  167. {
  168. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  169. struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
  170. int error;
  171. if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
  172. return 0;
  173. GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
  174. gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
  175. GFS2_LFC_RGRP_GO_SYNC);
  176. error = gfs2_rgrp_metasync(gl);
  177. if (!error)
  178. error = gfs2_ail_empty_gl(gl);
  179. gfs2_free_clones(rgd);
  180. return error;
  181. }
  182. /**
  183. * rgrp_go_inval - invalidate the metadata for this glock
  184. * @gl: the glock
  185. * @flags:
  186. *
  187. * We never used LM_ST_DEFERRED with resource groups, so that we
  188. * should always see the metadata flag set here.
  189. *
  190. */
  191. static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
  192. {
  193. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  194. struct address_space *mapping = &sdp->sd_aspace;
  195. struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
  196. const unsigned bsize = sdp->sd_sb.sb_bsize;
  197. loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
  198. loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
  199. gfs2_rgrp_brelse(rgd);
  200. WARN_ON_ONCE(!(flags & DIO_METADATA));
  201. truncate_inode_pages_range(mapping, start, end);
  202. }
  203. static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
  204. const char *fs_id_buf)
  205. {
  206. struct gfs2_rgrpd *rgd = gl->gl_object;
  207. if (rgd)
  208. gfs2_rgrp_dump(seq, rgd, fs_id_buf);
  209. }
  210. static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
  211. {
  212. struct gfs2_inode *ip;
  213. spin_lock(&gl->gl_lockref.lock);
  214. ip = gl->gl_object;
  215. if (ip)
  216. set_bit(GIF_GLOP_PENDING, &ip->i_flags);
  217. spin_unlock(&gl->gl_lockref.lock);
  218. return ip;
  219. }
  220. struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
  221. {
  222. struct gfs2_rgrpd *rgd;
  223. spin_lock(&gl->gl_lockref.lock);
  224. rgd = gl->gl_object;
  225. spin_unlock(&gl->gl_lockref.lock);
  226. return rgd;
  227. }
  228. static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
  229. {
  230. if (!ip)
  231. return;
  232. clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
  233. wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
  234. }
  235. /**
  236. * gfs2_inode_metasync - sync out the metadata of an inode
  237. * @gl: the glock protecting the inode
  238. *
  239. */
  240. int gfs2_inode_metasync(struct gfs2_glock *gl)
  241. {
  242. struct address_space *metamapping = gfs2_glock2aspace(gl);
  243. int error;
  244. filemap_fdatawrite(metamapping);
  245. error = filemap_fdatawait(metamapping);
  246. if (error)
  247. gfs2_io_error(gl->gl_name.ln_sbd);
  248. return error;
  249. }
  250. /**
  251. * inode_go_sync - Sync the dirty metadata of an inode
  252. * @gl: the glock protecting the inode
  253. *
  254. */
  255. static int inode_go_sync(struct gfs2_glock *gl)
  256. {
  257. struct gfs2_inode *ip = gfs2_glock2inode(gl);
  258. int isreg = ip && S_ISREG(ip->i_inode.i_mode);
  259. struct address_space *metamapping = gfs2_glock2aspace(gl);
  260. int error = 0, ret;
  261. if (isreg) {
  262. if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
  263. unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
  264. inode_dio_wait(&ip->i_inode);
  265. }
  266. if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
  267. goto out;
  268. GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
  269. gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
  270. GFS2_LFC_INODE_GO_SYNC);
  271. filemap_fdatawrite(metamapping);
  272. if (isreg) {
  273. struct address_space *mapping = ip->i_inode.i_mapping;
  274. filemap_fdatawrite(mapping);
  275. error = filemap_fdatawait(mapping);
  276. mapping_set_error(mapping, error);
  277. }
  278. ret = gfs2_inode_metasync(gl);
  279. if (!error)
  280. error = ret;
  281. gfs2_ail_empty_gl(gl);
  282. /*
  283. * Writeback of the data mapping may cause the dirty flag to be set
  284. * so we have to clear it again here.
  285. */
  286. smp_mb__before_atomic();
  287. clear_bit(GLF_DIRTY, &gl->gl_flags);
  288. out:
  289. gfs2_clear_glop_pending(ip);
  290. return error;
  291. }
  292. /**
  293. * inode_go_inval - prepare a inode glock to be released
  294. * @gl: the glock
  295. * @flags:
  296. *
  297. * Normally we invalidate everything, but if we are moving into
  298. * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
  299. * can keep hold of the metadata, since it won't have changed.
  300. *
  301. */
  302. static void inode_go_inval(struct gfs2_glock *gl, int flags)
  303. {
  304. struct gfs2_inode *ip = gfs2_glock2inode(gl);
  305. if (flags & DIO_METADATA) {
  306. struct address_space *mapping = gfs2_glock2aspace(gl);
  307. truncate_inode_pages(mapping, 0);
  308. if (ip) {
  309. set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
  310. forget_all_cached_acls(&ip->i_inode);
  311. security_inode_invalidate_secctx(&ip->i_inode);
  312. gfs2_dir_hash_inval(ip);
  313. }
  314. }
  315. if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
  316. gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
  317. GFS2_LOG_HEAD_FLUSH_NORMAL |
  318. GFS2_LFC_INODE_GO_INVAL);
  319. gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
  320. }
  321. if (ip && S_ISREG(ip->i_inode.i_mode))
  322. truncate_inode_pages(ip->i_inode.i_mapping, 0);
  323. gfs2_clear_glop_pending(ip);
  324. }
  325. /**
  326. * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
  327. * @gl: the glock
  328. *
  329. * Returns: 1 if it's ok
  330. */
  331. static int inode_go_demote_ok(const struct gfs2_glock *gl)
  332. {
  333. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  334. if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
  335. return 0;
  336. return 1;
  337. }
  338. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  339. {
  340. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  341. const struct gfs2_dinode *str = buf;
  342. struct timespec64 atime;
  343. u16 height, depth;
  344. umode_t mode = be32_to_cpu(str->di_mode);
  345. struct inode *inode = &ip->i_inode;
  346. bool is_new = inode->i_state & I_NEW;
  347. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  348. goto corrupt;
  349. if (unlikely(!is_new && inode_wrong_type(inode, mode)))
  350. goto corrupt;
  351. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  352. inode->i_mode = mode;
  353. if (is_new) {
  354. inode->i_rdev = 0;
  355. switch (mode & S_IFMT) {
  356. case S_IFBLK:
  357. case S_IFCHR:
  358. inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
  359. be32_to_cpu(str->di_minor));
  360. break;
  361. }
  362. }
  363. i_uid_write(inode, be32_to_cpu(str->di_uid));
  364. i_gid_write(inode, be32_to_cpu(str->di_gid));
  365. set_nlink(inode, be32_to_cpu(str->di_nlink));
  366. i_size_write(inode, be64_to_cpu(str->di_size));
  367. gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
  368. atime.tv_sec = be64_to_cpu(str->di_atime);
  369. atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  370. if (timespec64_compare(&inode->i_atime, &atime) < 0)
  371. inode->i_atime = atime;
  372. inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  373. inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  374. inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  375. inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  376. ip->i_goal = be64_to_cpu(str->di_goal_meta);
  377. ip->i_generation = be64_to_cpu(str->di_generation);
  378. ip->i_diskflags = be32_to_cpu(str->di_flags);
  379. ip->i_eattr = be64_to_cpu(str->di_eattr);
  380. /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
  381. gfs2_set_inode_flags(inode);
  382. height = be16_to_cpu(str->di_height);
  383. if (unlikely(height > sdp->sd_max_height))
  384. goto corrupt;
  385. ip->i_height = (u8)height;
  386. depth = be16_to_cpu(str->di_depth);
  387. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  388. goto corrupt;
  389. ip->i_depth = (u8)depth;
  390. ip->i_entries = be32_to_cpu(str->di_entries);
  391. if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
  392. goto corrupt;
  393. if (S_ISREG(inode->i_mode))
  394. gfs2_set_aops(inode);
  395. return 0;
  396. corrupt:
  397. gfs2_consist_inode(ip);
  398. return -EIO;
  399. }
  400. /**
  401. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  402. * @ip: The GFS2 inode
  403. *
  404. * Returns: errno
  405. */
  406. int gfs2_inode_refresh(struct gfs2_inode *ip)
  407. {
  408. struct buffer_head *dibh;
  409. int error;
  410. error = gfs2_meta_inode_buffer(ip, &dibh);
  411. if (error)
  412. return error;
  413. error = gfs2_dinode_in(ip, dibh->b_data);
  414. brelse(dibh);
  415. return error;
  416. }
  417. /**
  418. * inode_go_instantiate - read in an inode if necessary
  419. * @gh: The glock holder
  420. *
  421. * Returns: errno
  422. */
  423. static int inode_go_instantiate(struct gfs2_glock *gl)
  424. {
  425. struct gfs2_inode *ip = gl->gl_object;
  426. if (!ip) /* no inode to populate - read it in later */
  427. return 0;
  428. return gfs2_inode_refresh(ip);
  429. }
  430. static int inode_go_held(struct gfs2_holder *gh)
  431. {
  432. struct gfs2_glock *gl = gh->gh_gl;
  433. struct gfs2_inode *ip = gl->gl_object;
  434. int error = 0;
  435. if (!ip) /* no inode to populate - read it in later */
  436. return 0;
  437. if (gh->gh_state != LM_ST_DEFERRED)
  438. inode_dio_wait(&ip->i_inode);
  439. if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
  440. (gl->gl_state == LM_ST_EXCLUSIVE) &&
  441. (gh->gh_state == LM_ST_EXCLUSIVE))
  442. error = gfs2_truncatei_resume(ip);
  443. return error;
  444. }
  445. /**
  446. * inode_go_dump - print information about an inode
  447. * @seq: The iterator
  448. * @gl: The glock
  449. * @fs_id_buf: file system id (may be empty)
  450. *
  451. */
  452. static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
  453. const char *fs_id_buf)
  454. {
  455. struct gfs2_inode *ip = gl->gl_object;
  456. struct inode *inode = &ip->i_inode;
  457. unsigned long nrpages;
  458. if (ip == NULL)
  459. return;
  460. xa_lock_irq(&inode->i_data.i_pages);
  461. nrpages = inode->i_data.nrpages;
  462. xa_unlock_irq(&inode->i_data.i_pages);
  463. gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
  464. "p:%lu\n", fs_id_buf,
  465. (unsigned long long)ip->i_no_formal_ino,
  466. (unsigned long long)ip->i_no_addr,
  467. IF2DT(ip->i_inode.i_mode), ip->i_flags,
  468. (unsigned int)ip->i_diskflags,
  469. (unsigned long long)i_size_read(inode), nrpages);
  470. }
  471. /**
  472. * freeze_go_sync - promote/demote the freeze glock
  473. * @gl: the glock
  474. */
  475. static int freeze_go_sync(struct gfs2_glock *gl)
  476. {
  477. int error = 0;
  478. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  479. /*
  480. * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
  481. * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
  482. * all the nodes should have the freeze glock in SH mode and they all
  483. * call do_xmote: One for EX and the others for UN. They ALL must
  484. * freeze locally, and they ALL must queue freeze work. The freeze_work
  485. * calls freeze_func, which tries to reacquire the freeze glock in SH,
  486. * effectively waiting for the thaw on the node who holds it in EX.
  487. * Once thawed, the work func acquires the freeze glock in
  488. * SH and everybody goes back to thawed.
  489. */
  490. if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
  491. !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
  492. atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
  493. error = freeze_super(sdp->sd_vfs);
  494. if (error) {
  495. fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
  496. error);
  497. if (gfs2_withdrawn(sdp)) {
  498. atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
  499. return 0;
  500. }
  501. gfs2_assert_withdraw(sdp, 0);
  502. }
  503. queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
  504. if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
  505. gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
  506. GFS2_LFC_FREEZE_GO_SYNC);
  507. else /* read-only mounts */
  508. atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
  509. }
  510. return 0;
  511. }
  512. /**
  513. * freeze_go_xmote_bh - After promoting/demoting the freeze glock
  514. * @gl: the glock
  515. */
  516. static int freeze_go_xmote_bh(struct gfs2_glock *gl)
  517. {
  518. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  519. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  520. struct gfs2_glock *j_gl = ip->i_gl;
  521. struct gfs2_log_header_host head;
  522. int error;
  523. if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
  524. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
  525. error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
  526. if (gfs2_assert_withdraw_delayed(sdp, !error))
  527. return error;
  528. if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
  529. GFS2_LOG_HEAD_UNMOUNT))
  530. return -EIO;
  531. sdp->sd_log_sequence = head.lh_sequence + 1;
  532. gfs2_log_pointers_init(sdp, head.lh_blkno);
  533. }
  534. return 0;
  535. }
  536. /**
  537. * freeze_go_demote_ok
  538. * @gl: the glock
  539. *
  540. * Always returns 0
  541. */
  542. static int freeze_go_demote_ok(const struct gfs2_glock *gl)
  543. {
  544. return 0;
  545. }
  546. /**
  547. * iopen_go_callback - schedule the dcache entry for the inode to be deleted
  548. * @gl: the glock
  549. * @remote: true if this came from a different cluster node
  550. *
  551. * gl_lockref.lock lock is held while calling this
  552. */
  553. static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
  554. {
  555. struct gfs2_inode *ip = gl->gl_object;
  556. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  557. if (!remote || sb_rdonly(sdp->sd_vfs))
  558. return;
  559. if (gl->gl_demote_state == LM_ST_UNLOCKED &&
  560. gl->gl_state == LM_ST_SHARED && ip) {
  561. gl->gl_lockref.count++;
  562. if (!queue_delayed_work(gfs2_delete_workqueue,
  563. &gl->gl_delete, 0))
  564. gl->gl_lockref.count--;
  565. }
  566. }
  567. static int iopen_go_demote_ok(const struct gfs2_glock *gl)
  568. {
  569. return !gfs2_delete_work_queued(gl);
  570. }
  571. /**
  572. * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
  573. * @gl: glock being freed
  574. *
  575. * For now, this is only used for the journal inode glock. In withdraw
  576. * situations, we need to wait for the glock to be freed so that we know
  577. * other nodes may proceed with recovery / journal replay.
  578. */
  579. static void inode_go_free(struct gfs2_glock *gl)
  580. {
  581. /* Note that we cannot reference gl_object because it's already set
  582. * to NULL by this point in its lifecycle. */
  583. if (!test_bit(GLF_FREEING, &gl->gl_flags))
  584. return;
  585. clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
  586. wake_up_bit(&gl->gl_flags, GLF_FREEING);
  587. }
  588. /**
  589. * nondisk_go_callback - used to signal when a node did a withdraw
  590. * @gl: the nondisk glock
  591. * @remote: true if this came from a different cluster node
  592. *
  593. */
  594. static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
  595. {
  596. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  597. /* Ignore the callback unless it's from another node, and it's the
  598. live lock. */
  599. if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
  600. return;
  601. /* First order of business is to cancel the demote request. We don't
  602. * really want to demote a nondisk glock. At best it's just to inform
  603. * us of another node's withdraw. We'll keep it in SH mode. */
  604. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  605. clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
  606. /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
  607. if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
  608. test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
  609. test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
  610. return;
  611. /* We only care when a node wants us to unlock, because that means
  612. * they want a journal recovered. */
  613. if (gl->gl_demote_state != LM_ST_UNLOCKED)
  614. return;
  615. if (sdp->sd_args.ar_spectator) {
  616. fs_warn(sdp, "Spectator node cannot recover journals.\n");
  617. return;
  618. }
  619. fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
  620. set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
  621. /*
  622. * We can't call remote_withdraw directly here or gfs2_recover_journal
  623. * because this is called from the glock unlock function and the
  624. * remote_withdraw needs to enqueue and dequeue the same "live" glock
  625. * we were called from. So we queue it to the control work queue in
  626. * lock_dlm.
  627. */
  628. queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
  629. }
  630. const struct gfs2_glock_operations gfs2_meta_glops = {
  631. .go_type = LM_TYPE_META,
  632. .go_flags = GLOF_NONDISK,
  633. };
  634. const struct gfs2_glock_operations gfs2_inode_glops = {
  635. .go_sync = inode_go_sync,
  636. .go_inval = inode_go_inval,
  637. .go_demote_ok = inode_go_demote_ok,
  638. .go_instantiate = inode_go_instantiate,
  639. .go_held = inode_go_held,
  640. .go_dump = inode_go_dump,
  641. .go_type = LM_TYPE_INODE,
  642. .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
  643. .go_free = inode_go_free,
  644. };
  645. const struct gfs2_glock_operations gfs2_rgrp_glops = {
  646. .go_sync = rgrp_go_sync,
  647. .go_inval = rgrp_go_inval,
  648. .go_instantiate = gfs2_rgrp_go_instantiate,
  649. .go_dump = gfs2_rgrp_go_dump,
  650. .go_type = LM_TYPE_RGRP,
  651. .go_flags = GLOF_LVB,
  652. };
  653. const struct gfs2_glock_operations gfs2_freeze_glops = {
  654. .go_sync = freeze_go_sync,
  655. .go_xmote_bh = freeze_go_xmote_bh,
  656. .go_demote_ok = freeze_go_demote_ok,
  657. .go_type = LM_TYPE_NONDISK,
  658. .go_flags = GLOF_NONDISK,
  659. };
  660. const struct gfs2_glock_operations gfs2_iopen_glops = {
  661. .go_type = LM_TYPE_IOPEN,
  662. .go_callback = iopen_go_callback,
  663. .go_dump = inode_go_dump,
  664. .go_demote_ok = iopen_go_demote_ok,
  665. .go_flags = GLOF_LRU | GLOF_NONDISK,
  666. .go_subclass = 1,
  667. };
  668. const struct gfs2_glock_operations gfs2_flock_glops = {
  669. .go_type = LM_TYPE_FLOCK,
  670. .go_flags = GLOF_LRU | GLOF_NONDISK,
  671. };
  672. const struct gfs2_glock_operations gfs2_nondisk_glops = {
  673. .go_type = LM_TYPE_NONDISK,
  674. .go_flags = GLOF_NONDISK,
  675. .go_callback = nondisk_go_callback,
  676. };
  677. const struct gfs2_glock_operations gfs2_quota_glops = {
  678. .go_type = LM_TYPE_QUOTA,
  679. .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
  680. };
  681. const struct gfs2_glock_operations gfs2_journal_glops = {
  682. .go_type = LM_TYPE_JOURNAL,
  683. .go_flags = GLOF_NONDISK,
  684. };
  685. const struct gfs2_glock_operations *gfs2_glops_list[] = {
  686. [LM_TYPE_META] = &gfs2_meta_glops,
  687. [LM_TYPE_INODE] = &gfs2_inode_glops,
  688. [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
  689. [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
  690. [LM_TYPE_FLOCK] = &gfs2_flock_glops,
  691. [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
  692. [LM_TYPE_QUOTA] = &gfs2_quota_glops,
  693. [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
  694. };