xfs_mount.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_sb.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_dir2.h"
  17. #include "xfs_ialloc.h"
  18. #include "xfs_alloc.h"
  19. #include "xfs_rtalloc.h"
  20. #include "xfs_bmap.h"
  21. #include "xfs_trans.h"
  22. #include "xfs_trans_priv.h"
  23. #include "xfs_log.h"
  24. #include "xfs_log_priv.h"
  25. #include "xfs_error.h"
  26. #include "xfs_quota.h"
  27. #include "xfs_fsops.h"
  28. #include "xfs_icache.h"
  29. #include "xfs_sysfs.h"
  30. #include "xfs_rmap_btree.h"
  31. #include "xfs_refcount_btree.h"
  32. #include "xfs_reflink.h"
  33. #include "xfs_extent_busy.h"
  34. #include "xfs_health.h"
  35. #include "xfs_trace.h"
  36. #include "xfs_ag.h"
  37. static DEFINE_MUTEX(xfs_uuid_table_mutex);
  38. static int xfs_uuid_table_size;
  39. static uuid_t *xfs_uuid_table;
  40. void
  41. xfs_uuid_table_free(void)
  42. {
  43. if (xfs_uuid_table_size == 0)
  44. return;
  45. kmem_free(xfs_uuid_table);
  46. xfs_uuid_table = NULL;
  47. xfs_uuid_table_size = 0;
  48. }
  49. /*
  50. * See if the UUID is unique among mounted XFS filesystems.
  51. * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
  52. */
  53. STATIC int
  54. xfs_uuid_mount(
  55. struct xfs_mount *mp)
  56. {
  57. uuid_t *uuid = &mp->m_sb.sb_uuid;
  58. int hole, i;
  59. /* Publish UUID in struct super_block */
  60. uuid_copy(&mp->m_super->s_uuid, uuid);
  61. if (xfs_has_nouuid(mp))
  62. return 0;
  63. if (uuid_is_null(uuid)) {
  64. xfs_warn(mp, "Filesystem has null UUID - can't mount");
  65. return -EINVAL;
  66. }
  67. mutex_lock(&xfs_uuid_table_mutex);
  68. for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
  69. if (uuid_is_null(&xfs_uuid_table[i])) {
  70. hole = i;
  71. continue;
  72. }
  73. if (uuid_equal(uuid, &xfs_uuid_table[i]))
  74. goto out_duplicate;
  75. }
  76. if (hole < 0) {
  77. xfs_uuid_table = krealloc(xfs_uuid_table,
  78. (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
  79. GFP_KERNEL | __GFP_NOFAIL);
  80. hole = xfs_uuid_table_size++;
  81. }
  82. xfs_uuid_table[hole] = *uuid;
  83. mutex_unlock(&xfs_uuid_table_mutex);
  84. return 0;
  85. out_duplicate:
  86. mutex_unlock(&xfs_uuid_table_mutex);
  87. xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
  88. return -EINVAL;
  89. }
  90. STATIC void
  91. xfs_uuid_unmount(
  92. struct xfs_mount *mp)
  93. {
  94. uuid_t *uuid = &mp->m_sb.sb_uuid;
  95. int i;
  96. if (xfs_has_nouuid(mp))
  97. return;
  98. mutex_lock(&xfs_uuid_table_mutex);
  99. for (i = 0; i < xfs_uuid_table_size; i++) {
  100. if (uuid_is_null(&xfs_uuid_table[i]))
  101. continue;
  102. if (!uuid_equal(uuid, &xfs_uuid_table[i]))
  103. continue;
  104. memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
  105. break;
  106. }
  107. ASSERT(i < xfs_uuid_table_size);
  108. mutex_unlock(&xfs_uuid_table_mutex);
  109. }
  110. /*
  111. * Check size of device based on the (data/realtime) block count.
  112. * Note: this check is used by the growfs code as well as mount.
  113. */
  114. int
  115. xfs_sb_validate_fsb_count(
  116. xfs_sb_t *sbp,
  117. uint64_t nblocks)
  118. {
  119. ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
  120. ASSERT(sbp->sb_blocklog >= BBSHIFT);
  121. /* Limited by ULONG_MAX of page cache index */
  122. if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
  123. return -EFBIG;
  124. return 0;
  125. }
  126. /*
  127. * xfs_readsb
  128. *
  129. * Does the initial read of the superblock.
  130. */
  131. int
  132. xfs_readsb(
  133. struct xfs_mount *mp,
  134. int flags)
  135. {
  136. unsigned int sector_size;
  137. struct xfs_buf *bp;
  138. struct xfs_sb *sbp = &mp->m_sb;
  139. int error;
  140. int loud = !(flags & XFS_MFSI_QUIET);
  141. const struct xfs_buf_ops *buf_ops;
  142. ASSERT(mp->m_sb_bp == NULL);
  143. ASSERT(mp->m_ddev_targp != NULL);
  144. /*
  145. * For the initial read, we must guess at the sector
  146. * size based on the block device. It's enough to
  147. * get the sb_sectsize out of the superblock and
  148. * then reread with the proper length.
  149. * We don't verify it yet, because it may not be complete.
  150. */
  151. sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
  152. buf_ops = NULL;
  153. /*
  154. * Allocate a (locked) buffer to hold the superblock. This will be kept
  155. * around at all times to optimize access to the superblock. Therefore,
  156. * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
  157. * elevated.
  158. */
  159. reread:
  160. error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
  161. BTOBB(sector_size), XBF_NO_IOACCT, &bp,
  162. buf_ops);
  163. if (error) {
  164. if (loud)
  165. xfs_warn(mp, "SB validate failed with error %d.", error);
  166. /* bad CRC means corrupted metadata */
  167. if (error == -EFSBADCRC)
  168. error = -EFSCORRUPTED;
  169. return error;
  170. }
  171. /*
  172. * Initialize the mount structure from the superblock.
  173. */
  174. xfs_sb_from_disk(sbp, bp->b_addr);
  175. /*
  176. * If we haven't validated the superblock, do so now before we try
  177. * to check the sector size and reread the superblock appropriately.
  178. */
  179. if (sbp->sb_magicnum != XFS_SB_MAGIC) {
  180. if (loud)
  181. xfs_warn(mp, "Invalid superblock magic number");
  182. error = -EINVAL;
  183. goto release_buf;
  184. }
  185. /*
  186. * We must be able to do sector-sized and sector-aligned IO.
  187. */
  188. if (sector_size > sbp->sb_sectsize) {
  189. if (loud)
  190. xfs_warn(mp, "device supports %u byte sectors (not %u)",
  191. sector_size, sbp->sb_sectsize);
  192. error = -ENOSYS;
  193. goto release_buf;
  194. }
  195. if (buf_ops == NULL) {
  196. /*
  197. * Re-read the superblock so the buffer is correctly sized,
  198. * and properly verified.
  199. */
  200. xfs_buf_relse(bp);
  201. sector_size = sbp->sb_sectsize;
  202. buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
  203. goto reread;
  204. }
  205. mp->m_features |= xfs_sb_version_to_features(sbp);
  206. xfs_reinit_percpu_counters(mp);
  207. /* no need to be quiet anymore, so reset the buf ops */
  208. bp->b_ops = &xfs_sb_buf_ops;
  209. mp->m_sb_bp = bp;
  210. xfs_buf_unlock(bp);
  211. return 0;
  212. release_buf:
  213. xfs_buf_relse(bp);
  214. return error;
  215. }
  216. /*
  217. * If the sunit/swidth change would move the precomputed root inode value, we
  218. * must reject the ondisk change because repair will stumble over that.
  219. * However, we allow the mount to proceed because we never rejected this
  220. * combination before. Returns true to update the sb, false otherwise.
  221. */
  222. static inline int
  223. xfs_check_new_dalign(
  224. struct xfs_mount *mp,
  225. int new_dalign,
  226. bool *update_sb)
  227. {
  228. struct xfs_sb *sbp = &mp->m_sb;
  229. xfs_ino_t calc_ino;
  230. calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
  231. trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
  232. if (sbp->sb_rootino == calc_ino) {
  233. *update_sb = true;
  234. return 0;
  235. }
  236. xfs_warn(mp,
  237. "Cannot change stripe alignment; would require moving root inode.");
  238. /*
  239. * XXX: Next time we add a new incompat feature, this should start
  240. * returning -EINVAL to fail the mount. Until then, spit out a warning
  241. * that we're ignoring the administrator's instructions.
  242. */
  243. xfs_warn(mp, "Skipping superblock stripe alignment update.");
  244. *update_sb = false;
  245. return 0;
  246. }
  247. /*
  248. * If we were provided with new sunit/swidth values as mount options, make sure
  249. * that they pass basic alignment and superblock feature checks, and convert
  250. * them into the same units (FSB) that everything else expects. This step
  251. * /must/ be done before computing the inode geometry.
  252. */
  253. STATIC int
  254. xfs_validate_new_dalign(
  255. struct xfs_mount *mp)
  256. {
  257. if (mp->m_dalign == 0)
  258. return 0;
  259. /*
  260. * If stripe unit and stripe width are not multiples
  261. * of the fs blocksize turn off alignment.
  262. */
  263. if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
  264. (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
  265. xfs_warn(mp,
  266. "alignment check failed: sunit/swidth vs. blocksize(%d)",
  267. mp->m_sb.sb_blocksize);
  268. return -EINVAL;
  269. }
  270. /*
  271. * Convert the stripe unit and width to FSBs.
  272. */
  273. mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
  274. if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
  275. xfs_warn(mp,
  276. "alignment check failed: sunit/swidth vs. agsize(%d)",
  277. mp->m_sb.sb_agblocks);
  278. return -EINVAL;
  279. }
  280. if (!mp->m_dalign) {
  281. xfs_warn(mp,
  282. "alignment check failed: sunit(%d) less than bsize(%d)",
  283. mp->m_dalign, mp->m_sb.sb_blocksize);
  284. return -EINVAL;
  285. }
  286. mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
  287. if (!xfs_has_dalign(mp)) {
  288. xfs_warn(mp,
  289. "cannot change alignment: superblock does not support data alignment");
  290. return -EINVAL;
  291. }
  292. return 0;
  293. }
  294. /* Update alignment values based on mount options and sb values. */
  295. STATIC int
  296. xfs_update_alignment(
  297. struct xfs_mount *mp)
  298. {
  299. struct xfs_sb *sbp = &mp->m_sb;
  300. if (mp->m_dalign) {
  301. bool update_sb;
  302. int error;
  303. if (sbp->sb_unit == mp->m_dalign &&
  304. sbp->sb_width == mp->m_swidth)
  305. return 0;
  306. error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
  307. if (error || !update_sb)
  308. return error;
  309. sbp->sb_unit = mp->m_dalign;
  310. sbp->sb_width = mp->m_swidth;
  311. mp->m_update_sb = true;
  312. } else if (!xfs_has_noalign(mp) && xfs_has_dalign(mp)) {
  313. mp->m_dalign = sbp->sb_unit;
  314. mp->m_swidth = sbp->sb_width;
  315. }
  316. return 0;
  317. }
  318. /*
  319. * precalculate the low space thresholds for dynamic speculative preallocation.
  320. */
  321. void
  322. xfs_set_low_space_thresholds(
  323. struct xfs_mount *mp)
  324. {
  325. uint64_t dblocks = mp->m_sb.sb_dblocks;
  326. uint64_t rtexts = mp->m_sb.sb_rextents;
  327. int i;
  328. do_div(dblocks, 100);
  329. do_div(rtexts, 100);
  330. for (i = 0; i < XFS_LOWSP_MAX; i++) {
  331. mp->m_low_space[i] = dblocks * (i + 1);
  332. mp->m_low_rtexts[i] = rtexts * (i + 1);
  333. }
  334. }
  335. /*
  336. * Check that the data (and log if separate) is an ok size.
  337. */
  338. STATIC int
  339. xfs_check_sizes(
  340. struct xfs_mount *mp)
  341. {
  342. struct xfs_buf *bp;
  343. xfs_daddr_t d;
  344. int error;
  345. d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
  346. if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
  347. xfs_warn(mp, "filesystem size mismatch detected");
  348. return -EFBIG;
  349. }
  350. error = xfs_buf_read_uncached(mp->m_ddev_targp,
  351. d - XFS_FSS_TO_BB(mp, 1),
  352. XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
  353. if (error) {
  354. xfs_warn(mp, "last sector read failed");
  355. return error;
  356. }
  357. xfs_buf_relse(bp);
  358. if (mp->m_logdev_targp == mp->m_ddev_targp)
  359. return 0;
  360. d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
  361. if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
  362. xfs_warn(mp, "log size mismatch detected");
  363. return -EFBIG;
  364. }
  365. error = xfs_buf_read_uncached(mp->m_logdev_targp,
  366. d - XFS_FSB_TO_BB(mp, 1),
  367. XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
  368. if (error) {
  369. xfs_warn(mp, "log device read failed");
  370. return error;
  371. }
  372. xfs_buf_relse(bp);
  373. return 0;
  374. }
  375. /*
  376. * Clear the quotaflags in memory and in the superblock.
  377. */
  378. int
  379. xfs_mount_reset_sbqflags(
  380. struct xfs_mount *mp)
  381. {
  382. mp->m_qflags = 0;
  383. /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
  384. if (mp->m_sb.sb_qflags == 0)
  385. return 0;
  386. spin_lock(&mp->m_sb_lock);
  387. mp->m_sb.sb_qflags = 0;
  388. spin_unlock(&mp->m_sb_lock);
  389. if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
  390. return 0;
  391. return xfs_sync_sb(mp, false);
  392. }
  393. uint64_t
  394. xfs_default_resblks(xfs_mount_t *mp)
  395. {
  396. uint64_t resblks;
  397. /*
  398. * We default to 5% or 8192 fsbs of space reserved, whichever is
  399. * smaller. This is intended to cover concurrent allocation
  400. * transactions when we initially hit enospc. These each require a 4
  401. * block reservation. Hence by default we cover roughly 2000 concurrent
  402. * allocation reservations.
  403. */
  404. resblks = mp->m_sb.sb_dblocks;
  405. do_div(resblks, 20);
  406. resblks = min_t(uint64_t, resblks, 8192);
  407. return resblks;
  408. }
  409. /* Ensure the summary counts are correct. */
  410. STATIC int
  411. xfs_check_summary_counts(
  412. struct xfs_mount *mp)
  413. {
  414. int error = 0;
  415. /*
  416. * The AG0 superblock verifier rejects in-progress filesystems,
  417. * so we should never see the flag set this far into mounting.
  418. */
  419. if (mp->m_sb.sb_inprogress) {
  420. xfs_err(mp, "sb_inprogress set after log recovery??");
  421. WARN_ON(1);
  422. return -EFSCORRUPTED;
  423. }
  424. /*
  425. * Now the log is mounted, we know if it was an unclean shutdown or
  426. * not. If it was, with the first phase of recovery has completed, we
  427. * have consistent AG blocks on disk. We have not recovered EFIs yet,
  428. * but they are recovered transactionally in the second recovery phase
  429. * later.
  430. *
  431. * If the log was clean when we mounted, we can check the summary
  432. * counters. If any of them are obviously incorrect, we can recompute
  433. * them from the AGF headers in the next step.
  434. */
  435. if (xfs_is_clean(mp) &&
  436. (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
  437. !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
  438. mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
  439. xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
  440. /*
  441. * We can safely re-initialise incore superblock counters from the
  442. * per-ag data. These may not be correct if the filesystem was not
  443. * cleanly unmounted, so we waited for recovery to finish before doing
  444. * this.
  445. *
  446. * If the filesystem was cleanly unmounted or the previous check did
  447. * not flag anything weird, then we can trust the values in the
  448. * superblock to be correct and we don't need to do anything here.
  449. * Otherwise, recalculate the summary counters.
  450. */
  451. if ((xfs_has_lazysbcount(mp) && !xfs_is_clean(mp)) ||
  452. xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) {
  453. error = xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
  454. if (error)
  455. return error;
  456. }
  457. /*
  458. * Older kernels misused sb_frextents to reflect both incore
  459. * reservations made by running transactions and the actual count of
  460. * free rt extents in the ondisk metadata. Transactions committed
  461. * during runtime can therefore contain a superblock update that
  462. * undercounts the number of free rt extents tracked in the rt bitmap.
  463. * A clean unmount record will have the correct frextents value since
  464. * there can be no other transactions running at that point.
  465. *
  466. * If we're mounting the rt volume after recovering the log, recompute
  467. * frextents from the rtbitmap file to fix the inconsistency.
  468. */
  469. if (xfs_has_realtime(mp) && !xfs_is_clean(mp)) {
  470. error = xfs_rtalloc_reinit_frextents(mp);
  471. if (error)
  472. return error;
  473. }
  474. return 0;
  475. }
  476. /*
  477. * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
  478. * internal inode structures can be sitting in the CIL and AIL at this point,
  479. * so we need to unpin them, write them back and/or reclaim them before unmount
  480. * can proceed. In other words, callers are required to have inactivated all
  481. * inodes.
  482. *
  483. * An inode cluster that has been freed can have its buffer still pinned in
  484. * memory because the transaction is still sitting in a iclog. The stale inodes
  485. * on that buffer will be pinned to the buffer until the transaction hits the
  486. * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
  487. * may never see the pinned buffer, so nothing will push out the iclog and
  488. * unpin the buffer.
  489. *
  490. * Hence we need to force the log to unpin everything first. However, log
  491. * forces don't wait for the discards they issue to complete, so we have to
  492. * explicitly wait for them to complete here as well.
  493. *
  494. * Then we can tell the world we are unmounting so that error handling knows
  495. * that the filesystem is going away and we should error out anything that we
  496. * have been retrying in the background. This will prevent never-ending
  497. * retries in AIL pushing from hanging the unmount.
  498. *
  499. * Finally, we can push the AIL to clean all the remaining dirty objects, then
  500. * reclaim the remaining inodes that are still in memory at this point in time.
  501. */
  502. static void
  503. xfs_unmount_flush_inodes(
  504. struct xfs_mount *mp)
  505. {
  506. xfs_log_force(mp, XFS_LOG_SYNC);
  507. xfs_extent_busy_wait_all(mp);
  508. flush_workqueue(xfs_discard_wq);
  509. set_bit(XFS_OPSTATE_UNMOUNTING, &mp->m_opstate);
  510. xfs_ail_push_all_sync(mp->m_ail);
  511. xfs_inodegc_stop(mp);
  512. cancel_delayed_work_sync(&mp->m_reclaim_work);
  513. xfs_reclaim_inodes(mp);
  514. xfs_health_unmount(mp);
  515. }
  516. static void
  517. xfs_mount_setup_inode_geom(
  518. struct xfs_mount *mp)
  519. {
  520. struct xfs_ino_geometry *igeo = M_IGEO(mp);
  521. igeo->attr_fork_offset = xfs_bmap_compute_attr_offset(mp);
  522. ASSERT(igeo->attr_fork_offset < XFS_LITINO(mp));
  523. xfs_ialloc_setup_geometry(mp);
  524. }
  525. /* Compute maximum possible height for per-AG btree types for this fs. */
  526. static inline void
  527. xfs_agbtree_compute_maxlevels(
  528. struct xfs_mount *mp)
  529. {
  530. unsigned int levels;
  531. levels = max(mp->m_alloc_maxlevels, M_IGEO(mp)->inobt_maxlevels);
  532. levels = max(levels, mp->m_rmap_maxlevels);
  533. mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels);
  534. }
  535. /*
  536. * This function does the following on an initial mount of a file system:
  537. * - reads the superblock from disk and init the mount struct
  538. * - if we're a 32-bit kernel, do a size check on the superblock
  539. * so we don't mount terabyte filesystems
  540. * - init mount struct realtime fields
  541. * - allocate inode hash table for fs
  542. * - init directory manager
  543. * - perform recovery and init the log manager
  544. */
  545. int
  546. xfs_mountfs(
  547. struct xfs_mount *mp)
  548. {
  549. struct xfs_sb *sbp = &(mp->m_sb);
  550. struct xfs_inode *rip;
  551. struct xfs_ino_geometry *igeo = M_IGEO(mp);
  552. uint64_t resblks;
  553. uint quotamount = 0;
  554. uint quotaflags = 0;
  555. int error = 0;
  556. xfs_sb_mount_common(mp, sbp);
  557. /*
  558. * Check for a mismatched features2 values. Older kernels read & wrote
  559. * into the wrong sb offset for sb_features2 on some platforms due to
  560. * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
  561. * which made older superblock reading/writing routines swap it as a
  562. * 64-bit value.
  563. *
  564. * For backwards compatibility, we make both slots equal.
  565. *
  566. * If we detect a mismatched field, we OR the set bits into the existing
  567. * features2 field in case it has already been modified; we don't want
  568. * to lose any features. We then update the bad location with the ORed
  569. * value so that older kernels will see any features2 flags. The
  570. * superblock writeback code ensures the new sb_features2 is copied to
  571. * sb_bad_features2 before it is logged or written to disk.
  572. */
  573. if (xfs_sb_has_mismatched_features2(sbp)) {
  574. xfs_warn(mp, "correcting sb_features alignment problem");
  575. sbp->sb_features2 |= sbp->sb_bad_features2;
  576. mp->m_update_sb = true;
  577. }
  578. /* always use v2 inodes by default now */
  579. if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
  580. mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
  581. mp->m_features |= XFS_FEAT_NLINK;
  582. mp->m_update_sb = true;
  583. }
  584. /*
  585. * If we were given new sunit/swidth options, do some basic validation
  586. * checks and convert the incore dalign and swidth values to the
  587. * same units (FSB) that everything else uses. This /must/ happen
  588. * before computing the inode geometry.
  589. */
  590. error = xfs_validate_new_dalign(mp);
  591. if (error)
  592. goto out;
  593. xfs_alloc_compute_maxlevels(mp);
  594. xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
  595. xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
  596. xfs_mount_setup_inode_geom(mp);
  597. xfs_rmapbt_compute_maxlevels(mp);
  598. xfs_refcountbt_compute_maxlevels(mp);
  599. xfs_agbtree_compute_maxlevels(mp);
  600. /*
  601. * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
  602. * is NOT aligned turn off m_dalign since allocator alignment is within
  603. * an ag, therefore ag has to be aligned at stripe boundary. Note that
  604. * we must compute the free space and rmap btree geometry before doing
  605. * this.
  606. */
  607. error = xfs_update_alignment(mp);
  608. if (error)
  609. goto out;
  610. /* enable fail_at_unmount as default */
  611. mp->m_fail_unmount = true;
  612. error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
  613. NULL, mp->m_super->s_id);
  614. if (error)
  615. goto out;
  616. error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
  617. &mp->m_kobj, "stats");
  618. if (error)
  619. goto out_remove_sysfs;
  620. error = xfs_error_sysfs_init(mp);
  621. if (error)
  622. goto out_del_stats;
  623. error = xfs_errortag_init(mp);
  624. if (error)
  625. goto out_remove_error_sysfs;
  626. error = xfs_uuid_mount(mp);
  627. if (error)
  628. goto out_remove_errortag;
  629. /*
  630. * Update the preferred write size based on the information from the
  631. * on-disk superblock.
  632. */
  633. mp->m_allocsize_log =
  634. max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
  635. mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
  636. /* set the low space thresholds for dynamic preallocation */
  637. xfs_set_low_space_thresholds(mp);
  638. /*
  639. * If enabled, sparse inode chunk alignment is expected to match the
  640. * cluster size. Full inode chunk alignment must match the chunk size,
  641. * but that is checked on sb read verification...
  642. */
  643. if (xfs_has_sparseinodes(mp) &&
  644. mp->m_sb.sb_spino_align !=
  645. XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
  646. xfs_warn(mp,
  647. "Sparse inode block alignment (%u) must match cluster size (%llu).",
  648. mp->m_sb.sb_spino_align,
  649. XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
  650. error = -EINVAL;
  651. goto out_remove_uuid;
  652. }
  653. /*
  654. * Check that the data (and log if separate) is an ok size.
  655. */
  656. error = xfs_check_sizes(mp);
  657. if (error)
  658. goto out_remove_uuid;
  659. /*
  660. * Initialize realtime fields in the mount structure
  661. */
  662. error = xfs_rtmount_init(mp);
  663. if (error) {
  664. xfs_warn(mp, "RT mount failed");
  665. goto out_remove_uuid;
  666. }
  667. /*
  668. * Copies the low order bits of the timestamp and the randomly
  669. * set "sequence" number out of a UUID.
  670. */
  671. mp->m_fixedfsid[0] =
  672. (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
  673. get_unaligned_be16(&sbp->sb_uuid.b[4]);
  674. mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
  675. error = xfs_da_mount(mp);
  676. if (error) {
  677. xfs_warn(mp, "Failed dir/attr init: %d", error);
  678. goto out_remove_uuid;
  679. }
  680. /*
  681. * Initialize the precomputed transaction reservations values.
  682. */
  683. xfs_trans_init(mp);
  684. /*
  685. * Allocate and initialize the per-ag data.
  686. */
  687. error = xfs_initialize_perag(mp, sbp->sb_agcount, mp->m_sb.sb_dblocks,
  688. &mp->m_maxagi);
  689. if (error) {
  690. xfs_warn(mp, "Failed per-ag init: %d", error);
  691. goto out_free_dir;
  692. }
  693. if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
  694. xfs_warn(mp, "no log defined");
  695. error = -EFSCORRUPTED;
  696. goto out_free_perag;
  697. }
  698. error = xfs_inodegc_register_shrinker(mp);
  699. if (error)
  700. goto out_fail_wait;
  701. /*
  702. * Log's mount-time initialization. The first part of recovery can place
  703. * some items on the AIL, to be handled when recovery is finished or
  704. * cancelled.
  705. */
  706. error = xfs_log_mount(mp, mp->m_logdev_targp,
  707. XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
  708. XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
  709. if (error) {
  710. xfs_warn(mp, "log mount failed");
  711. goto out_inodegc_shrinker;
  712. }
  713. /* Enable background inode inactivation workers. */
  714. xfs_inodegc_start(mp);
  715. xfs_blockgc_start(mp);
  716. /*
  717. * Now that we've recovered any pending superblock feature bit
  718. * additions, we can finish setting up the attr2 behaviour for the
  719. * mount. The noattr2 option overrides the superblock flag, so only
  720. * check the superblock feature flag if the mount option is not set.
  721. */
  722. if (xfs_has_noattr2(mp)) {
  723. mp->m_features &= ~XFS_FEAT_ATTR2;
  724. } else if (!xfs_has_attr2(mp) &&
  725. (mp->m_sb.sb_features2 & XFS_SB_VERSION2_ATTR2BIT)) {
  726. mp->m_features |= XFS_FEAT_ATTR2;
  727. }
  728. /*
  729. * Get and sanity-check the root inode.
  730. * Save the pointer to it in the mount structure.
  731. */
  732. error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
  733. XFS_ILOCK_EXCL, &rip);
  734. if (error) {
  735. xfs_warn(mp,
  736. "Failed to read root inode 0x%llx, error %d",
  737. sbp->sb_rootino, -error);
  738. goto out_log_dealloc;
  739. }
  740. ASSERT(rip != NULL);
  741. if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
  742. xfs_warn(mp, "corrupted root inode %llu: not a directory",
  743. (unsigned long long)rip->i_ino);
  744. xfs_iunlock(rip, XFS_ILOCK_EXCL);
  745. error = -EFSCORRUPTED;
  746. goto out_rele_rip;
  747. }
  748. mp->m_rootip = rip; /* save it */
  749. xfs_iunlock(rip, XFS_ILOCK_EXCL);
  750. /*
  751. * Initialize realtime inode pointers in the mount structure
  752. */
  753. error = xfs_rtmount_inodes(mp);
  754. if (error) {
  755. /*
  756. * Free up the root inode.
  757. */
  758. xfs_warn(mp, "failed to read RT inodes");
  759. goto out_rele_rip;
  760. }
  761. /* Make sure the summary counts are ok. */
  762. error = xfs_check_summary_counts(mp);
  763. if (error)
  764. goto out_rtunmount;
  765. /*
  766. * If this is a read-only mount defer the superblock updates until
  767. * the next remount into writeable mode. Otherwise we would never
  768. * perform the update e.g. for the root filesystem.
  769. */
  770. if (mp->m_update_sb && !xfs_is_readonly(mp)) {
  771. error = xfs_sync_sb(mp, false);
  772. if (error) {
  773. xfs_warn(mp, "failed to write sb changes");
  774. goto out_rtunmount;
  775. }
  776. }
  777. /*
  778. * Initialise the XFS quota management subsystem for this mount
  779. */
  780. if (XFS_IS_QUOTA_ON(mp)) {
  781. error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
  782. if (error)
  783. goto out_rtunmount;
  784. } else {
  785. /*
  786. * If a file system had quotas running earlier, but decided to
  787. * mount without -o uquota/pquota/gquota options, revoke the
  788. * quotachecked license.
  789. */
  790. if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
  791. xfs_notice(mp, "resetting quota flags");
  792. error = xfs_mount_reset_sbqflags(mp);
  793. if (error)
  794. goto out_rtunmount;
  795. }
  796. }
  797. /*
  798. * Finish recovering the file system. This part needed to be delayed
  799. * until after the root and real-time bitmap inodes were consistently
  800. * read in. Temporarily create per-AG space reservations for metadata
  801. * btree shape changes because space freeing transactions (for inode
  802. * inactivation) require the per-AG reservation in lieu of reserving
  803. * blocks.
  804. */
  805. error = xfs_fs_reserve_ag_blocks(mp);
  806. if (error && error == -ENOSPC)
  807. xfs_warn(mp,
  808. "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
  809. error = xfs_log_mount_finish(mp);
  810. xfs_fs_unreserve_ag_blocks(mp);
  811. if (error) {
  812. xfs_warn(mp, "log mount finish failed");
  813. goto out_rtunmount;
  814. }
  815. /*
  816. * Now the log is fully replayed, we can transition to full read-only
  817. * mode for read-only mounts. This will sync all the metadata and clean
  818. * the log so that the recovery we just performed does not have to be
  819. * replayed again on the next mount.
  820. *
  821. * We use the same quiesce mechanism as the rw->ro remount, as they are
  822. * semantically identical operations.
  823. */
  824. if (xfs_is_readonly(mp) && !xfs_has_norecovery(mp))
  825. xfs_log_clean(mp);
  826. /*
  827. * Complete the quota initialisation, post-log-replay component.
  828. */
  829. if (quotamount) {
  830. ASSERT(mp->m_qflags == 0);
  831. mp->m_qflags = quotaflags;
  832. xfs_qm_mount_quotas(mp);
  833. }
  834. /*
  835. * Now we are mounted, reserve a small amount of unused space for
  836. * privileged transactions. This is needed so that transaction
  837. * space required for critical operations can dip into this pool
  838. * when at ENOSPC. This is needed for operations like create with
  839. * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
  840. * are not allowed to use this reserved space.
  841. *
  842. * This may drive us straight to ENOSPC on mount, but that implies
  843. * we were already there on the last unmount. Warn if this occurs.
  844. */
  845. if (!xfs_is_readonly(mp)) {
  846. resblks = xfs_default_resblks(mp);
  847. error = xfs_reserve_blocks(mp, &resblks, NULL);
  848. if (error)
  849. xfs_warn(mp,
  850. "Unable to allocate reserve blocks. Continuing without reserve pool.");
  851. /* Reserve AG blocks for future btree expansion. */
  852. error = xfs_fs_reserve_ag_blocks(mp);
  853. if (error && error != -ENOSPC)
  854. goto out_agresv;
  855. }
  856. return 0;
  857. out_agresv:
  858. xfs_fs_unreserve_ag_blocks(mp);
  859. xfs_qm_unmount_quotas(mp);
  860. out_rtunmount:
  861. xfs_rtunmount_inodes(mp);
  862. out_rele_rip:
  863. xfs_irele(rip);
  864. /* Clean out dquots that might be in memory after quotacheck. */
  865. xfs_qm_unmount(mp);
  866. /*
  867. * Inactivate all inodes that might still be in memory after a log
  868. * intent recovery failure so that reclaim can free them. Metadata
  869. * inodes and the root directory shouldn't need inactivation, but the
  870. * mount failed for some reason, so pull down all the state and flee.
  871. */
  872. xfs_inodegc_flush(mp);
  873. /*
  874. * Flush all inode reclamation work and flush the log.
  875. * We have to do this /after/ rtunmount and qm_unmount because those
  876. * two will have scheduled delayed reclaim for the rt/quota inodes.
  877. *
  878. * This is slightly different from the unmountfs call sequence
  879. * because we could be tearing down a partially set up mount. In
  880. * particular, if log_mount_finish fails we bail out without calling
  881. * qm_unmount_quotas and therefore rely on qm_unmount to release the
  882. * quota inodes.
  883. */
  884. xfs_unmount_flush_inodes(mp);
  885. out_log_dealloc:
  886. xfs_log_mount_cancel(mp);
  887. out_inodegc_shrinker:
  888. unregister_shrinker(&mp->m_inodegc_shrinker);
  889. out_fail_wait:
  890. if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
  891. xfs_buftarg_drain(mp->m_logdev_targp);
  892. xfs_buftarg_drain(mp->m_ddev_targp);
  893. out_free_perag:
  894. xfs_free_perag(mp);
  895. out_free_dir:
  896. xfs_da_unmount(mp);
  897. out_remove_uuid:
  898. xfs_uuid_unmount(mp);
  899. out_remove_errortag:
  900. xfs_errortag_del(mp);
  901. out_remove_error_sysfs:
  902. xfs_error_sysfs_del(mp);
  903. out_del_stats:
  904. xfs_sysfs_del(&mp->m_stats.xs_kobj);
  905. out_remove_sysfs:
  906. xfs_sysfs_del(&mp->m_kobj);
  907. out:
  908. return error;
  909. }
  910. /*
  911. * This flushes out the inodes,dquots and the superblock, unmounts the
  912. * log and makes sure that incore structures are freed.
  913. */
  914. void
  915. xfs_unmountfs(
  916. struct xfs_mount *mp)
  917. {
  918. uint64_t resblks;
  919. int error;
  920. /*
  921. * Perform all on-disk metadata updates required to inactivate inodes
  922. * that the VFS evicted earlier in the unmount process. Freeing inodes
  923. * and discarding CoW fork preallocations can cause shape changes to
  924. * the free inode and refcount btrees, respectively, so we must finish
  925. * this before we discard the metadata space reservations. Metadata
  926. * inodes and the root directory do not require inactivation.
  927. */
  928. xfs_inodegc_flush(mp);
  929. xfs_blockgc_stop(mp);
  930. xfs_fs_unreserve_ag_blocks(mp);
  931. xfs_qm_unmount_quotas(mp);
  932. xfs_rtunmount_inodes(mp);
  933. xfs_irele(mp->m_rootip);
  934. xfs_unmount_flush_inodes(mp);
  935. xfs_qm_unmount(mp);
  936. /*
  937. * Unreserve any blocks we have so that when we unmount we don't account
  938. * the reserved free space as used. This is really only necessary for
  939. * lazy superblock counting because it trusts the incore superblock
  940. * counters to be absolutely correct on clean unmount.
  941. *
  942. * We don't bother correcting this elsewhere for lazy superblock
  943. * counting because on mount of an unclean filesystem we reconstruct the
  944. * correct counter value and this is irrelevant.
  945. *
  946. * For non-lazy counter filesystems, this doesn't matter at all because
  947. * we only every apply deltas to the superblock and hence the incore
  948. * value does not matter....
  949. */
  950. resblks = 0;
  951. error = xfs_reserve_blocks(mp, &resblks, NULL);
  952. if (error)
  953. xfs_warn(mp, "Unable to free reserved block pool. "
  954. "Freespace may not be correct on next mount.");
  955. xfs_log_unmount(mp);
  956. xfs_da_unmount(mp);
  957. xfs_uuid_unmount(mp);
  958. #if defined(DEBUG)
  959. xfs_errortag_clearall(mp);
  960. #endif
  961. unregister_shrinker(&mp->m_inodegc_shrinker);
  962. xfs_free_perag(mp);
  963. xfs_errortag_del(mp);
  964. xfs_error_sysfs_del(mp);
  965. xfs_sysfs_del(&mp->m_stats.xs_kobj);
  966. xfs_sysfs_del(&mp->m_kobj);
  967. }
  968. /*
  969. * Determine whether modifications can proceed. The caller specifies the minimum
  970. * freeze level for which modifications should not be allowed. This allows
  971. * certain operations to proceed while the freeze sequence is in progress, if
  972. * necessary.
  973. */
  974. bool
  975. xfs_fs_writable(
  976. struct xfs_mount *mp,
  977. int level)
  978. {
  979. ASSERT(level > SB_UNFROZEN);
  980. if ((mp->m_super->s_writers.frozen >= level) ||
  981. xfs_is_shutdown(mp) || xfs_is_readonly(mp))
  982. return false;
  983. return true;
  984. }
  985. /* Adjust m_fdblocks or m_frextents. */
  986. int
  987. xfs_mod_freecounter(
  988. struct xfs_mount *mp,
  989. struct percpu_counter *counter,
  990. int64_t delta,
  991. bool rsvd)
  992. {
  993. int64_t lcounter;
  994. long long res_used;
  995. uint64_t set_aside = 0;
  996. s32 batch;
  997. bool has_resv_pool;
  998. ASSERT(counter == &mp->m_fdblocks || counter == &mp->m_frextents);
  999. has_resv_pool = (counter == &mp->m_fdblocks);
  1000. if (rsvd)
  1001. ASSERT(has_resv_pool);
  1002. if (delta > 0) {
  1003. /*
  1004. * If the reserve pool is depleted, put blocks back into it
  1005. * first. Most of the time the pool is full.
  1006. */
  1007. if (likely(!has_resv_pool ||
  1008. mp->m_resblks == mp->m_resblks_avail)) {
  1009. percpu_counter_add(counter, delta);
  1010. return 0;
  1011. }
  1012. spin_lock(&mp->m_sb_lock);
  1013. res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
  1014. if (res_used > delta) {
  1015. mp->m_resblks_avail += delta;
  1016. } else {
  1017. delta -= res_used;
  1018. mp->m_resblks_avail = mp->m_resblks;
  1019. percpu_counter_add(counter, delta);
  1020. }
  1021. spin_unlock(&mp->m_sb_lock);
  1022. return 0;
  1023. }
  1024. /*
  1025. * Taking blocks away, need to be more accurate the closer we
  1026. * are to zero.
  1027. *
  1028. * If the counter has a value of less than 2 * max batch size,
  1029. * then make everything serialise as we are real close to
  1030. * ENOSPC.
  1031. */
  1032. if (__percpu_counter_compare(counter, 2 * XFS_FDBLOCKS_BATCH,
  1033. XFS_FDBLOCKS_BATCH) < 0)
  1034. batch = 1;
  1035. else
  1036. batch = XFS_FDBLOCKS_BATCH;
  1037. /*
  1038. * Set aside allocbt blocks because these blocks are tracked as free
  1039. * space but not available for allocation. Technically this means that a
  1040. * single reservation cannot consume all remaining free space, but the
  1041. * ratio of allocbt blocks to usable free blocks should be rather small.
  1042. * The tradeoff without this is that filesystems that maintain high
  1043. * perag block reservations can over reserve physical block availability
  1044. * and fail physical allocation, which leads to much more serious
  1045. * problems (i.e. transaction abort, pagecache discards, etc.) than
  1046. * slightly premature -ENOSPC.
  1047. */
  1048. if (has_resv_pool)
  1049. set_aside = xfs_fdblocks_unavailable(mp);
  1050. percpu_counter_add_batch(counter, delta, batch);
  1051. if (__percpu_counter_compare(counter, set_aside,
  1052. XFS_FDBLOCKS_BATCH) >= 0) {
  1053. /* we had space! */
  1054. return 0;
  1055. }
  1056. /*
  1057. * lock up the sb for dipping into reserves before releasing the space
  1058. * that took us to ENOSPC.
  1059. */
  1060. spin_lock(&mp->m_sb_lock);
  1061. percpu_counter_add(counter, -delta);
  1062. if (!has_resv_pool || !rsvd)
  1063. goto fdblocks_enospc;
  1064. lcounter = (long long)mp->m_resblks_avail + delta;
  1065. if (lcounter >= 0) {
  1066. mp->m_resblks_avail = lcounter;
  1067. spin_unlock(&mp->m_sb_lock);
  1068. return 0;
  1069. }
  1070. xfs_warn_once(mp,
  1071. "Reserve blocks depleted! Consider increasing reserve pool size.");
  1072. fdblocks_enospc:
  1073. spin_unlock(&mp->m_sb_lock);
  1074. return -ENOSPC;
  1075. }
  1076. /*
  1077. * Used to free the superblock along various error paths.
  1078. */
  1079. void
  1080. xfs_freesb(
  1081. struct xfs_mount *mp)
  1082. {
  1083. struct xfs_buf *bp = mp->m_sb_bp;
  1084. xfs_buf_lock(bp);
  1085. mp->m_sb_bp = NULL;
  1086. xfs_buf_relse(bp);
  1087. }
  1088. /*
  1089. * If the underlying (data/log/rt) device is readonly, there are some
  1090. * operations that cannot proceed.
  1091. */
  1092. int
  1093. xfs_dev_is_read_only(
  1094. struct xfs_mount *mp,
  1095. char *message)
  1096. {
  1097. if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
  1098. xfs_readonly_buftarg(mp->m_logdev_targp) ||
  1099. (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
  1100. xfs_notice(mp, "%s required on read-only device.", message);
  1101. xfs_notice(mp, "write access unavailable, cannot proceed.");
  1102. return -EROFS;
  1103. }
  1104. return 0;
  1105. }
  1106. /* Force the summary counters to be recalculated at next mount. */
  1107. void
  1108. xfs_force_summary_recalc(
  1109. struct xfs_mount *mp)
  1110. {
  1111. if (!xfs_has_lazysbcount(mp))
  1112. return;
  1113. xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
  1114. }
  1115. /*
  1116. * Enable a log incompat feature flag in the primary superblock. The caller
  1117. * cannot have any other transactions in progress.
  1118. */
  1119. int
  1120. xfs_add_incompat_log_feature(
  1121. struct xfs_mount *mp,
  1122. uint32_t feature)
  1123. {
  1124. struct xfs_dsb *dsb;
  1125. int error;
  1126. ASSERT(hweight32(feature) == 1);
  1127. ASSERT(!(feature & XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
  1128. /*
  1129. * Force the log to disk and kick the background AIL thread to reduce
  1130. * the chances that the bwrite will stall waiting for the AIL to unpin
  1131. * the primary superblock buffer. This isn't a data integrity
  1132. * operation, so we don't need a synchronous push.
  1133. */
  1134. error = xfs_log_force(mp, XFS_LOG_SYNC);
  1135. if (error)
  1136. return error;
  1137. xfs_ail_push_all(mp->m_ail);
  1138. /*
  1139. * Lock the primary superblock buffer to serialize all callers that
  1140. * are trying to set feature bits.
  1141. */
  1142. xfs_buf_lock(mp->m_sb_bp);
  1143. xfs_buf_hold(mp->m_sb_bp);
  1144. if (xfs_is_shutdown(mp)) {
  1145. error = -EIO;
  1146. goto rele;
  1147. }
  1148. if (xfs_sb_has_incompat_log_feature(&mp->m_sb, feature))
  1149. goto rele;
  1150. /*
  1151. * Write the primary superblock to disk immediately, because we need
  1152. * the log_incompat bit to be set in the primary super now to protect
  1153. * the log items that we're going to commit later.
  1154. */
  1155. dsb = mp->m_sb_bp->b_addr;
  1156. xfs_sb_to_disk(dsb, &mp->m_sb);
  1157. dsb->sb_features_log_incompat |= cpu_to_be32(feature);
  1158. error = xfs_bwrite(mp->m_sb_bp);
  1159. if (error)
  1160. goto shutdown;
  1161. /*
  1162. * Add the feature bits to the incore superblock before we unlock the
  1163. * buffer.
  1164. */
  1165. xfs_sb_add_incompat_log_features(&mp->m_sb, feature);
  1166. xfs_buf_relse(mp->m_sb_bp);
  1167. /* Log the superblock to disk. */
  1168. return xfs_sync_sb(mp, false);
  1169. shutdown:
  1170. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1171. rele:
  1172. xfs_buf_relse(mp->m_sb_bp);
  1173. return error;
  1174. }
  1175. /*
  1176. * Clear all the log incompat flags from the superblock.
  1177. *
  1178. * The caller cannot be in a transaction, must ensure that the log does not
  1179. * contain any log items protected by any log incompat bit, and must ensure
  1180. * that there are no other threads that depend on the state of the log incompat
  1181. * feature flags in the primary super.
  1182. *
  1183. * Returns true if the superblock is dirty.
  1184. */
  1185. bool
  1186. xfs_clear_incompat_log_features(
  1187. struct xfs_mount *mp)
  1188. {
  1189. bool ret = false;
  1190. if (!xfs_has_crc(mp) ||
  1191. !xfs_sb_has_incompat_log_feature(&mp->m_sb,
  1192. XFS_SB_FEAT_INCOMPAT_LOG_ALL) ||
  1193. xfs_is_shutdown(mp))
  1194. return false;
  1195. /*
  1196. * Update the incore superblock. We synchronize on the primary super
  1197. * buffer lock to be consistent with the add function, though at least
  1198. * in theory this shouldn't be necessary.
  1199. */
  1200. xfs_buf_lock(mp->m_sb_bp);
  1201. xfs_buf_hold(mp->m_sb_bp);
  1202. if (xfs_sb_has_incompat_log_feature(&mp->m_sb,
  1203. XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
  1204. xfs_sb_remove_incompat_log_features(&mp->m_sb);
  1205. ret = true;
  1206. }
  1207. xfs_buf_relse(mp->m_sb_bp);
  1208. return ret;
  1209. }
  1210. /*
  1211. * Update the in-core delayed block counter.
  1212. *
  1213. * We prefer to update the counter without having to take a spinlock for every
  1214. * counter update (i.e. batching). Each change to delayed allocation
  1215. * reservations can change can easily exceed the default percpu counter
  1216. * batching, so we use a larger batch factor here.
  1217. *
  1218. * Note that we don't currently have any callers requiring fast summation
  1219. * (e.g. percpu_counter_read) so we can use a big batch value here.
  1220. */
  1221. #define XFS_DELALLOC_BATCH (4096)
  1222. void
  1223. xfs_mod_delalloc(
  1224. struct xfs_mount *mp,
  1225. int64_t delta)
  1226. {
  1227. percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
  1228. XFS_DELALLOC_BATCH);
  1229. }