quota.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <[email protected]>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_log_format.h"
  13. #include "xfs_trans.h"
  14. #include "xfs_inode.h"
  15. #include "xfs_quota.h"
  16. #include "xfs_qm.h"
  17. #include "scrub/scrub.h"
  18. #include "scrub/common.h"
  19. /* Convert a scrub type code to a DQ flag, or return 0 if error. */
  20. static inline xfs_dqtype_t
  21. xchk_quota_to_dqtype(
  22. struct xfs_scrub *sc)
  23. {
  24. switch (sc->sm->sm_type) {
  25. case XFS_SCRUB_TYPE_UQUOTA:
  26. return XFS_DQTYPE_USER;
  27. case XFS_SCRUB_TYPE_GQUOTA:
  28. return XFS_DQTYPE_GROUP;
  29. case XFS_SCRUB_TYPE_PQUOTA:
  30. return XFS_DQTYPE_PROJ;
  31. default:
  32. return 0;
  33. }
  34. }
  35. /* Set us up to scrub a quota. */
  36. int
  37. xchk_setup_quota(
  38. struct xfs_scrub *sc)
  39. {
  40. xfs_dqtype_t dqtype;
  41. int error;
  42. if (!XFS_IS_QUOTA_ON(sc->mp))
  43. return -ENOENT;
  44. dqtype = xchk_quota_to_dqtype(sc);
  45. if (dqtype == 0)
  46. return -EINVAL;
  47. if (!xfs_this_quota_on(sc->mp, dqtype))
  48. return -ENOENT;
  49. error = xchk_setup_fs(sc);
  50. if (error)
  51. return error;
  52. sc->ip = xfs_quota_inode(sc->mp, dqtype);
  53. xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
  54. sc->ilock_flags = XFS_ILOCK_EXCL;
  55. return 0;
  56. }
  57. /* Quotas. */
  58. struct xchk_quota_info {
  59. struct xfs_scrub *sc;
  60. xfs_dqid_t last_id;
  61. };
  62. /* Scrub the fields in an individual quota item. */
  63. STATIC int
  64. xchk_quota_item(
  65. struct xfs_dquot *dq,
  66. xfs_dqtype_t dqtype,
  67. void *priv)
  68. {
  69. struct xchk_quota_info *sqi = priv;
  70. struct xfs_scrub *sc = sqi->sc;
  71. struct xfs_mount *mp = sc->mp;
  72. struct xfs_quotainfo *qi = mp->m_quotainfo;
  73. xfs_fileoff_t offset;
  74. xfs_ino_t fs_icount;
  75. int error = 0;
  76. if (xchk_should_terminate(sc, &error))
  77. return -ECANCELED;
  78. /*
  79. * Except for the root dquot, the actual dquot we got must either have
  80. * the same or higher id as we saw before.
  81. */
  82. offset = dq->q_id / qi->qi_dqperchunk;
  83. if (dq->q_id && dq->q_id <= sqi->last_id)
  84. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
  85. sqi->last_id = dq->q_id;
  86. /*
  87. * Warn if the hard limits are larger than the fs.
  88. * Administrators can do this, though in production this seems
  89. * suspect, which is why we flag it for review.
  90. *
  91. * Complain about corruption if the soft limit is greater than
  92. * the hard limit.
  93. */
  94. if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
  95. xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
  96. if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
  97. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
  98. if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
  99. xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
  100. if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
  101. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
  102. if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
  103. xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
  104. if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
  105. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
  106. /* Check the resource counts. */
  107. fs_icount = percpu_counter_sum(&mp->m_icount);
  108. /*
  109. * Check that usage doesn't exceed physical limits. However, on
  110. * a reflink filesystem we're allowed to exceed physical space
  111. * if there are no quota limits.
  112. */
  113. if (xfs_has_reflink(mp)) {
  114. if (mp->m_sb.sb_dblocks < dq->q_blk.count)
  115. xchk_fblock_set_warning(sc, XFS_DATA_FORK,
  116. offset);
  117. } else {
  118. if (mp->m_sb.sb_dblocks < dq->q_blk.count)
  119. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
  120. offset);
  121. }
  122. if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
  123. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
  124. /*
  125. * We can violate the hard limits if the admin suddenly sets a
  126. * lower limit than the actual usage. However, we flag it for
  127. * admin review.
  128. */
  129. if (dq->q_id == 0)
  130. goto out;
  131. if (dq->q_blk.hardlimit != 0 &&
  132. dq->q_blk.count > dq->q_blk.hardlimit)
  133. xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
  134. if (dq->q_ino.hardlimit != 0 &&
  135. dq->q_ino.count > dq->q_ino.hardlimit)
  136. xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
  137. if (dq->q_rtb.hardlimit != 0 &&
  138. dq->q_rtb.count > dq->q_rtb.hardlimit)
  139. xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
  140. out:
  141. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  142. return -ECANCELED;
  143. return 0;
  144. }
  145. /* Check the quota's data fork. */
  146. STATIC int
  147. xchk_quota_data_fork(
  148. struct xfs_scrub *sc)
  149. {
  150. struct xfs_bmbt_irec irec = { 0 };
  151. struct xfs_iext_cursor icur;
  152. struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
  153. struct xfs_ifork *ifp;
  154. xfs_fileoff_t max_dqid_off;
  155. int error = 0;
  156. /* Invoke the fork scrubber. */
  157. error = xchk_metadata_inode_forks(sc);
  158. if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
  159. return error;
  160. /* Check for data fork problems that apply only to quota files. */
  161. max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
  162. ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
  163. for_each_xfs_iext(ifp, &icur, &irec) {
  164. if (xchk_should_terminate(sc, &error))
  165. break;
  166. /*
  167. * delalloc extents or blocks mapped above the highest
  168. * quota id shouldn't happen.
  169. */
  170. if (isnullstartblock(irec.br_startblock) ||
  171. irec.br_startoff > max_dqid_off ||
  172. irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
  173. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
  174. irec.br_startoff);
  175. break;
  176. }
  177. }
  178. return error;
  179. }
  180. /* Scrub all of a quota type's items. */
  181. int
  182. xchk_quota(
  183. struct xfs_scrub *sc)
  184. {
  185. struct xchk_quota_info sqi;
  186. struct xfs_mount *mp = sc->mp;
  187. struct xfs_quotainfo *qi = mp->m_quotainfo;
  188. xfs_dqtype_t dqtype;
  189. int error = 0;
  190. dqtype = xchk_quota_to_dqtype(sc);
  191. /* Look for problem extents. */
  192. error = xchk_quota_data_fork(sc);
  193. if (error)
  194. goto out;
  195. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  196. goto out;
  197. /*
  198. * Check all the quota items. Now that we've checked the quota inode
  199. * data fork we have to drop ILOCK_EXCL to use the regular dquot
  200. * functions.
  201. */
  202. xfs_iunlock(sc->ip, sc->ilock_flags);
  203. sc->ilock_flags = 0;
  204. sqi.sc = sc;
  205. sqi.last_id = 0;
  206. error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
  207. sc->ilock_flags = XFS_ILOCK_EXCL;
  208. xfs_ilock(sc->ip, sc->ilock_flags);
  209. if (error == -ECANCELED)
  210. error = 0;
  211. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
  212. sqi.last_id * qi->qi_dqperchunk, &error))
  213. goto out;
  214. out:
  215. return error;
  216. }