xfs_qm.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_sb.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_iwalk.h"
  17. #include "xfs_quota.h"
  18. #include "xfs_bmap.h"
  19. #include "xfs_bmap_util.h"
  20. #include "xfs_trans.h"
  21. #include "xfs_trans_space.h"
  22. #include "xfs_qm.h"
  23. #include "xfs_trace.h"
  24. #include "xfs_icache.h"
  25. #include "xfs_error.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_ialloc.h"
  28. #include "xfs_log_priv.h"
  29. /*
  30. * The global quota manager. There is only one of these for the entire
  31. * system, _not_ one per file system. XQM keeps track of the overall
  32. * quota functionality, including maintaining the freelist and hash
  33. * tables of dquots.
  34. */
  35. STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
  36. STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
  37. STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
  38. STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  39. /*
  40. * We use the batch lookup interface to iterate over the dquots as it
  41. * currently is the only interface into the radix tree code that allows
  42. * fuzzy lookups instead of exact matches. Holding the lock over multiple
  43. * operations is fine as all callers are used either during mount/umount
  44. * or quotaoff.
  45. */
  46. #define XFS_DQ_LOOKUP_BATCH 32
  47. STATIC int
  48. xfs_qm_dquot_walk(
  49. struct xfs_mount *mp,
  50. xfs_dqtype_t type,
  51. int (*execute)(struct xfs_dquot *dqp, void *data),
  52. void *data)
  53. {
  54. struct xfs_quotainfo *qi = mp->m_quotainfo;
  55. struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
  56. uint32_t next_index;
  57. int last_error = 0;
  58. int skipped;
  59. int nr_found;
  60. restart:
  61. skipped = 0;
  62. next_index = 0;
  63. nr_found = 0;
  64. while (1) {
  65. struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  66. int error = 0;
  67. int i;
  68. mutex_lock(&qi->qi_tree_lock);
  69. nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  70. next_index, XFS_DQ_LOOKUP_BATCH);
  71. if (!nr_found) {
  72. mutex_unlock(&qi->qi_tree_lock);
  73. break;
  74. }
  75. for (i = 0; i < nr_found; i++) {
  76. struct xfs_dquot *dqp = batch[i];
  77. next_index = dqp->q_id + 1;
  78. error = execute(batch[i], data);
  79. if (error == -EAGAIN) {
  80. skipped++;
  81. continue;
  82. }
  83. if (error && last_error != -EFSCORRUPTED)
  84. last_error = error;
  85. }
  86. mutex_unlock(&qi->qi_tree_lock);
  87. /* bail out if the filesystem is corrupted. */
  88. if (last_error == -EFSCORRUPTED) {
  89. skipped = 0;
  90. break;
  91. }
  92. /* we're done if id overflows back to zero */
  93. if (!next_index)
  94. break;
  95. }
  96. if (skipped) {
  97. delay(1);
  98. goto restart;
  99. }
  100. return last_error;
  101. }
  102. /*
  103. * Purge a dquot from all tracking data structures and free it.
  104. */
  105. STATIC int
  106. xfs_qm_dqpurge(
  107. struct xfs_dquot *dqp,
  108. void *data)
  109. {
  110. struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
  111. int error = -EAGAIN;
  112. xfs_dqlock(dqp);
  113. if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
  114. goto out_unlock;
  115. dqp->q_flags |= XFS_DQFLAG_FREEING;
  116. xfs_dqflock(dqp);
  117. /*
  118. * If we are turning this type of quotas off, we don't care
  119. * about the dirty metadata sitting in this dquot. OTOH, if
  120. * we're unmounting, we do care, so we flush it and wait.
  121. */
  122. if (XFS_DQ_IS_DIRTY(dqp)) {
  123. struct xfs_buf *bp = NULL;
  124. /*
  125. * We don't care about getting disk errors here. We need
  126. * to purge this dquot anyway, so we go ahead regardless.
  127. */
  128. error = xfs_qm_dqflush(dqp, &bp);
  129. if (!error) {
  130. error = xfs_bwrite(bp);
  131. xfs_buf_relse(bp);
  132. } else if (error == -EAGAIN) {
  133. dqp->q_flags &= ~XFS_DQFLAG_FREEING;
  134. goto out_unlock;
  135. }
  136. xfs_dqflock(dqp);
  137. }
  138. ASSERT(atomic_read(&dqp->q_pincount) == 0);
  139. ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
  140. !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
  141. xfs_dqfunlock(dqp);
  142. xfs_dqunlock(dqp);
  143. radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
  144. qi->qi_dquots--;
  145. /*
  146. * We move dquots to the freelist as soon as their reference count
  147. * hits zero, so it really should be on the freelist here.
  148. */
  149. ASSERT(!list_empty(&dqp->q_lru));
  150. list_lru_del(&qi->qi_lru, &dqp->q_lru);
  151. XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
  152. xfs_qm_dqdestroy(dqp);
  153. return 0;
  154. out_unlock:
  155. xfs_dqunlock(dqp);
  156. return error;
  157. }
  158. /*
  159. * Purge the dquot cache.
  160. */
  161. static void
  162. xfs_qm_dqpurge_all(
  163. struct xfs_mount *mp)
  164. {
  165. xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
  166. xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
  167. xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
  168. }
  169. /*
  170. * Just destroy the quotainfo structure.
  171. */
  172. void
  173. xfs_qm_unmount(
  174. struct xfs_mount *mp)
  175. {
  176. if (mp->m_quotainfo) {
  177. xfs_qm_dqpurge_all(mp);
  178. xfs_qm_destroy_quotainfo(mp);
  179. }
  180. }
  181. /*
  182. * Called from the vfsops layer.
  183. */
  184. void
  185. xfs_qm_unmount_quotas(
  186. xfs_mount_t *mp)
  187. {
  188. /*
  189. * Release the dquots that root inode, et al might be holding,
  190. * before we flush quotas and blow away the quotainfo structure.
  191. */
  192. ASSERT(mp->m_rootip);
  193. xfs_qm_dqdetach(mp->m_rootip);
  194. if (mp->m_rbmip)
  195. xfs_qm_dqdetach(mp->m_rbmip);
  196. if (mp->m_rsumip)
  197. xfs_qm_dqdetach(mp->m_rsumip);
  198. /*
  199. * Release the quota inodes.
  200. */
  201. if (mp->m_quotainfo) {
  202. if (mp->m_quotainfo->qi_uquotaip) {
  203. xfs_irele(mp->m_quotainfo->qi_uquotaip);
  204. mp->m_quotainfo->qi_uquotaip = NULL;
  205. }
  206. if (mp->m_quotainfo->qi_gquotaip) {
  207. xfs_irele(mp->m_quotainfo->qi_gquotaip);
  208. mp->m_quotainfo->qi_gquotaip = NULL;
  209. }
  210. if (mp->m_quotainfo->qi_pquotaip) {
  211. xfs_irele(mp->m_quotainfo->qi_pquotaip);
  212. mp->m_quotainfo->qi_pquotaip = NULL;
  213. }
  214. }
  215. }
  216. STATIC int
  217. xfs_qm_dqattach_one(
  218. struct xfs_inode *ip,
  219. xfs_dqtype_t type,
  220. bool doalloc,
  221. struct xfs_dquot **IO_idqpp)
  222. {
  223. struct xfs_dquot *dqp;
  224. int error;
  225. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  226. error = 0;
  227. /*
  228. * See if we already have it in the inode itself. IO_idqpp is &i_udquot
  229. * or &i_gdquot. This made the code look weird, but made the logic a lot
  230. * simpler.
  231. */
  232. dqp = *IO_idqpp;
  233. if (dqp) {
  234. trace_xfs_dqattach_found(dqp);
  235. return 0;
  236. }
  237. /*
  238. * Find the dquot from somewhere. This bumps the reference count of
  239. * dquot and returns it locked. This can return ENOENT if dquot didn't
  240. * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
  241. * turned off suddenly.
  242. */
  243. error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
  244. if (error)
  245. return error;
  246. trace_xfs_dqattach_get(dqp);
  247. /*
  248. * dqget may have dropped and re-acquired the ilock, but it guarantees
  249. * that the dquot returned is the one that should go in the inode.
  250. */
  251. *IO_idqpp = dqp;
  252. xfs_dqunlock(dqp);
  253. return 0;
  254. }
  255. static bool
  256. xfs_qm_need_dqattach(
  257. struct xfs_inode *ip)
  258. {
  259. struct xfs_mount *mp = ip->i_mount;
  260. if (!XFS_IS_QUOTA_ON(mp))
  261. return false;
  262. if (!XFS_NOT_DQATTACHED(mp, ip))
  263. return false;
  264. if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  265. return false;
  266. return true;
  267. }
  268. /*
  269. * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  270. * into account.
  271. * If @doalloc is true, the dquot(s) will be allocated if needed.
  272. * Inode may get unlocked and relocked in here, and the caller must deal with
  273. * the consequences.
  274. */
  275. int
  276. xfs_qm_dqattach_locked(
  277. xfs_inode_t *ip,
  278. bool doalloc)
  279. {
  280. xfs_mount_t *mp = ip->i_mount;
  281. int error = 0;
  282. if (!xfs_qm_need_dqattach(ip))
  283. return 0;
  284. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  285. if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
  286. error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
  287. doalloc, &ip->i_udquot);
  288. if (error)
  289. goto done;
  290. ASSERT(ip->i_udquot);
  291. }
  292. if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
  293. error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
  294. doalloc, &ip->i_gdquot);
  295. if (error)
  296. goto done;
  297. ASSERT(ip->i_gdquot);
  298. }
  299. if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
  300. error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
  301. doalloc, &ip->i_pdquot);
  302. if (error)
  303. goto done;
  304. ASSERT(ip->i_pdquot);
  305. }
  306. done:
  307. /*
  308. * Don't worry about the dquots that we may have attached before any
  309. * error - they'll get detached later if it has not already been done.
  310. */
  311. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  312. return error;
  313. }
  314. int
  315. xfs_qm_dqattach(
  316. struct xfs_inode *ip)
  317. {
  318. int error;
  319. if (!xfs_qm_need_dqattach(ip))
  320. return 0;
  321. xfs_ilock(ip, XFS_ILOCK_EXCL);
  322. error = xfs_qm_dqattach_locked(ip, false);
  323. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  324. return error;
  325. }
  326. /*
  327. * Release dquots (and their references) if any.
  328. * The inode should be locked EXCL except when this's called by
  329. * xfs_ireclaim.
  330. */
  331. void
  332. xfs_qm_dqdetach(
  333. xfs_inode_t *ip)
  334. {
  335. if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
  336. return;
  337. trace_xfs_dquot_dqdetach(ip);
  338. ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
  339. if (ip->i_udquot) {
  340. xfs_qm_dqrele(ip->i_udquot);
  341. ip->i_udquot = NULL;
  342. }
  343. if (ip->i_gdquot) {
  344. xfs_qm_dqrele(ip->i_gdquot);
  345. ip->i_gdquot = NULL;
  346. }
  347. if (ip->i_pdquot) {
  348. xfs_qm_dqrele(ip->i_pdquot);
  349. ip->i_pdquot = NULL;
  350. }
  351. }
  352. struct xfs_qm_isolate {
  353. struct list_head buffers;
  354. struct list_head dispose;
  355. };
  356. static enum lru_status
  357. xfs_qm_dquot_isolate(
  358. struct list_head *item,
  359. struct list_lru_one *lru,
  360. spinlock_t *lru_lock,
  361. void *arg)
  362. __releases(lru_lock) __acquires(lru_lock)
  363. {
  364. struct xfs_dquot *dqp = container_of(item,
  365. struct xfs_dquot, q_lru);
  366. struct xfs_qm_isolate *isol = arg;
  367. if (!xfs_dqlock_nowait(dqp))
  368. goto out_miss_busy;
  369. /*
  370. * This dquot has acquired a reference in the meantime remove it from
  371. * the freelist and try again.
  372. */
  373. if (dqp->q_nrefs) {
  374. xfs_dqunlock(dqp);
  375. XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
  376. trace_xfs_dqreclaim_want(dqp);
  377. list_lru_isolate(lru, &dqp->q_lru);
  378. XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
  379. return LRU_REMOVED;
  380. }
  381. /*
  382. * If the dquot is dirty, flush it. If it's already being flushed, just
  383. * skip it so there is time for the IO to complete before we try to
  384. * reclaim it again on the next LRU pass.
  385. */
  386. if (!xfs_dqflock_nowait(dqp)) {
  387. xfs_dqunlock(dqp);
  388. goto out_miss_busy;
  389. }
  390. if (XFS_DQ_IS_DIRTY(dqp)) {
  391. struct xfs_buf *bp = NULL;
  392. int error;
  393. trace_xfs_dqreclaim_dirty(dqp);
  394. /* we have to drop the LRU lock to flush the dquot */
  395. spin_unlock(lru_lock);
  396. error = xfs_qm_dqflush(dqp, &bp);
  397. if (error)
  398. goto out_unlock_dirty;
  399. xfs_buf_delwri_queue(bp, &isol->buffers);
  400. xfs_buf_relse(bp);
  401. goto out_unlock_dirty;
  402. }
  403. xfs_dqfunlock(dqp);
  404. /*
  405. * Prevent lookups now that we are past the point of no return.
  406. */
  407. dqp->q_flags |= XFS_DQFLAG_FREEING;
  408. xfs_dqunlock(dqp);
  409. ASSERT(dqp->q_nrefs == 0);
  410. list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
  411. XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
  412. trace_xfs_dqreclaim_done(dqp);
  413. XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
  414. return LRU_REMOVED;
  415. out_miss_busy:
  416. trace_xfs_dqreclaim_busy(dqp);
  417. XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
  418. return LRU_SKIP;
  419. out_unlock_dirty:
  420. trace_xfs_dqreclaim_busy(dqp);
  421. XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
  422. xfs_dqunlock(dqp);
  423. spin_lock(lru_lock);
  424. return LRU_RETRY;
  425. }
  426. static unsigned long
  427. xfs_qm_shrink_scan(
  428. struct shrinker *shrink,
  429. struct shrink_control *sc)
  430. {
  431. struct xfs_quotainfo *qi = container_of(shrink,
  432. struct xfs_quotainfo, qi_shrinker);
  433. struct xfs_qm_isolate isol;
  434. unsigned long freed;
  435. int error;
  436. if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
  437. return 0;
  438. INIT_LIST_HEAD(&isol.buffers);
  439. INIT_LIST_HEAD(&isol.dispose);
  440. freed = list_lru_shrink_walk(&qi->qi_lru, sc,
  441. xfs_qm_dquot_isolate, &isol);
  442. error = xfs_buf_delwri_submit(&isol.buffers);
  443. if (error)
  444. xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
  445. while (!list_empty(&isol.dispose)) {
  446. struct xfs_dquot *dqp;
  447. dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
  448. list_del_init(&dqp->q_lru);
  449. xfs_qm_dqfree_one(dqp);
  450. }
  451. return freed;
  452. }
  453. static unsigned long
  454. xfs_qm_shrink_count(
  455. struct shrinker *shrink,
  456. struct shrink_control *sc)
  457. {
  458. struct xfs_quotainfo *qi = container_of(shrink,
  459. struct xfs_quotainfo, qi_shrinker);
  460. return list_lru_shrink_count(&qi->qi_lru, sc);
  461. }
  462. STATIC void
  463. xfs_qm_set_defquota(
  464. struct xfs_mount *mp,
  465. xfs_dqtype_t type,
  466. struct xfs_quotainfo *qinf)
  467. {
  468. struct xfs_dquot *dqp;
  469. struct xfs_def_quota *defq;
  470. int error;
  471. error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
  472. if (error)
  473. return;
  474. defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
  475. /*
  476. * Timers and warnings have been already set, let's just set the
  477. * default limits for this quota type
  478. */
  479. defq->blk.hard = dqp->q_blk.hardlimit;
  480. defq->blk.soft = dqp->q_blk.softlimit;
  481. defq->ino.hard = dqp->q_ino.hardlimit;
  482. defq->ino.soft = dqp->q_ino.softlimit;
  483. defq->rtb.hard = dqp->q_rtb.hardlimit;
  484. defq->rtb.soft = dqp->q_rtb.softlimit;
  485. xfs_qm_dqdestroy(dqp);
  486. }
  487. /* Initialize quota time limits from the root dquot. */
  488. static void
  489. xfs_qm_init_timelimits(
  490. struct xfs_mount *mp,
  491. xfs_dqtype_t type)
  492. {
  493. struct xfs_quotainfo *qinf = mp->m_quotainfo;
  494. struct xfs_def_quota *defq;
  495. struct xfs_dquot *dqp;
  496. int error;
  497. defq = xfs_get_defquota(qinf, type);
  498. defq->blk.time = XFS_QM_BTIMELIMIT;
  499. defq->ino.time = XFS_QM_ITIMELIMIT;
  500. defq->rtb.time = XFS_QM_RTBTIMELIMIT;
  501. /*
  502. * We try to get the limits from the superuser's limits fields.
  503. * This is quite hacky, but it is standard quota practice.
  504. *
  505. * Since we may not have done a quotacheck by this point, just read
  506. * the dquot without attaching it to any hashtables or lists.
  507. */
  508. error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
  509. if (error)
  510. return;
  511. /*
  512. * The warnings and timers set the grace period given to
  513. * a user or group before he or she can not perform any
  514. * more writing. If it is zero, a default is used.
  515. */
  516. if (dqp->q_blk.timer)
  517. defq->blk.time = dqp->q_blk.timer;
  518. if (dqp->q_ino.timer)
  519. defq->ino.time = dqp->q_ino.timer;
  520. if (dqp->q_rtb.timer)
  521. defq->rtb.time = dqp->q_rtb.timer;
  522. xfs_qm_dqdestroy(dqp);
  523. }
  524. /*
  525. * This initializes all the quota information that's kept in the
  526. * mount structure
  527. */
  528. STATIC int
  529. xfs_qm_init_quotainfo(
  530. struct xfs_mount *mp)
  531. {
  532. struct xfs_quotainfo *qinf;
  533. int error;
  534. ASSERT(XFS_IS_QUOTA_ON(mp));
  535. qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
  536. error = list_lru_init(&qinf->qi_lru);
  537. if (error)
  538. goto out_free_qinf;
  539. /*
  540. * See if quotainodes are setup, and if not, allocate them,
  541. * and change the superblock accordingly.
  542. */
  543. error = xfs_qm_init_quotainos(mp);
  544. if (error)
  545. goto out_free_lru;
  546. INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
  547. INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
  548. INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
  549. mutex_init(&qinf->qi_tree_lock);
  550. /* mutex used to serialize quotaoffs */
  551. mutex_init(&qinf->qi_quotaofflock);
  552. /* Precalc some constants */
  553. qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  554. qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
  555. if (xfs_has_bigtime(mp)) {
  556. qinf->qi_expiry_min =
  557. xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
  558. qinf->qi_expiry_max =
  559. xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
  560. } else {
  561. qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
  562. qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
  563. }
  564. trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
  565. qinf->qi_expiry_max);
  566. mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
  567. xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
  568. xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
  569. xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
  570. if (XFS_IS_UQUOTA_ON(mp))
  571. xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
  572. if (XFS_IS_GQUOTA_ON(mp))
  573. xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
  574. if (XFS_IS_PQUOTA_ON(mp))
  575. xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
  576. qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
  577. qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
  578. qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
  579. qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
  580. error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
  581. mp->m_super->s_id);
  582. if (error)
  583. goto out_free_inos;
  584. return 0;
  585. out_free_inos:
  586. mutex_destroy(&qinf->qi_quotaofflock);
  587. mutex_destroy(&qinf->qi_tree_lock);
  588. xfs_qm_destroy_quotainos(qinf);
  589. out_free_lru:
  590. list_lru_destroy(&qinf->qi_lru);
  591. out_free_qinf:
  592. kmem_free(qinf);
  593. mp->m_quotainfo = NULL;
  594. return error;
  595. }
  596. /*
  597. * Gets called when unmounting a filesystem or when all quotas get
  598. * turned off.
  599. * This purges the quota inodes, destroys locks and frees itself.
  600. */
  601. void
  602. xfs_qm_destroy_quotainfo(
  603. struct xfs_mount *mp)
  604. {
  605. struct xfs_quotainfo *qi;
  606. qi = mp->m_quotainfo;
  607. ASSERT(qi != NULL);
  608. unregister_shrinker(&qi->qi_shrinker);
  609. list_lru_destroy(&qi->qi_lru);
  610. xfs_qm_destroy_quotainos(qi);
  611. mutex_destroy(&qi->qi_tree_lock);
  612. mutex_destroy(&qi->qi_quotaofflock);
  613. kmem_free(qi);
  614. mp->m_quotainfo = NULL;
  615. }
  616. /*
  617. * Create an inode and return with a reference already taken, but unlocked
  618. * This is how we create quota inodes
  619. */
  620. STATIC int
  621. xfs_qm_qino_alloc(
  622. struct xfs_mount *mp,
  623. struct xfs_inode **ipp,
  624. unsigned int flags)
  625. {
  626. struct xfs_trans *tp;
  627. int error;
  628. bool need_alloc = true;
  629. *ipp = NULL;
  630. /*
  631. * With superblock that doesn't have separate pquotino, we
  632. * share an inode between gquota and pquota. If the on-disk
  633. * superblock has GQUOTA and the filesystem is now mounted
  634. * with PQUOTA, just use sb_gquotino for sb_pquotino and
  635. * vice-versa.
  636. */
  637. if (!xfs_has_pquotino(mp) &&
  638. (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
  639. xfs_ino_t ino = NULLFSINO;
  640. if ((flags & XFS_QMOPT_PQUOTA) &&
  641. (mp->m_sb.sb_gquotino != NULLFSINO)) {
  642. ino = mp->m_sb.sb_gquotino;
  643. if (XFS_IS_CORRUPT(mp,
  644. mp->m_sb.sb_pquotino != NULLFSINO))
  645. return -EFSCORRUPTED;
  646. } else if ((flags & XFS_QMOPT_GQUOTA) &&
  647. (mp->m_sb.sb_pquotino != NULLFSINO)) {
  648. ino = mp->m_sb.sb_pquotino;
  649. if (XFS_IS_CORRUPT(mp,
  650. mp->m_sb.sb_gquotino != NULLFSINO))
  651. return -EFSCORRUPTED;
  652. }
  653. if (ino != NULLFSINO) {
  654. error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
  655. if (error)
  656. return error;
  657. mp->m_sb.sb_gquotino = NULLFSINO;
  658. mp->m_sb.sb_pquotino = NULLFSINO;
  659. need_alloc = false;
  660. }
  661. }
  662. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
  663. need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
  664. 0, 0, &tp);
  665. if (error)
  666. return error;
  667. if (need_alloc) {
  668. xfs_ino_t ino;
  669. error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
  670. if (!error)
  671. error = xfs_init_new_inode(&init_user_ns, tp, NULL, ino,
  672. S_IFREG, 1, 0, 0, false, ipp);
  673. if (error) {
  674. xfs_trans_cancel(tp);
  675. return error;
  676. }
  677. }
  678. /*
  679. * Make the changes in the superblock, and log those too.
  680. * sbfields arg may contain fields other than *QUOTINO;
  681. * VERSIONNUM for example.
  682. */
  683. spin_lock(&mp->m_sb_lock);
  684. if (flags & XFS_QMOPT_SBVERSION) {
  685. ASSERT(!xfs_has_quota(mp));
  686. xfs_add_quota(mp);
  687. mp->m_sb.sb_uquotino = NULLFSINO;
  688. mp->m_sb.sb_gquotino = NULLFSINO;
  689. mp->m_sb.sb_pquotino = NULLFSINO;
  690. /* qflags will get updated fully _after_ quotacheck */
  691. mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
  692. }
  693. if (flags & XFS_QMOPT_UQUOTA)
  694. mp->m_sb.sb_uquotino = (*ipp)->i_ino;
  695. else if (flags & XFS_QMOPT_GQUOTA)
  696. mp->m_sb.sb_gquotino = (*ipp)->i_ino;
  697. else
  698. mp->m_sb.sb_pquotino = (*ipp)->i_ino;
  699. spin_unlock(&mp->m_sb_lock);
  700. xfs_log_sb(tp);
  701. error = xfs_trans_commit(tp);
  702. if (error) {
  703. ASSERT(xfs_is_shutdown(mp));
  704. xfs_alert(mp, "%s failed (error %d)!", __func__, error);
  705. }
  706. if (need_alloc)
  707. xfs_finish_inode_setup(*ipp);
  708. return error;
  709. }
  710. STATIC void
  711. xfs_qm_reset_dqcounts(
  712. struct xfs_mount *mp,
  713. struct xfs_buf *bp,
  714. xfs_dqid_t id,
  715. xfs_dqtype_t type)
  716. {
  717. struct xfs_dqblk *dqb;
  718. int j;
  719. trace_xfs_reset_dqcounts(bp, _RET_IP_);
  720. /*
  721. * Reset all counters and timers. They'll be
  722. * started afresh by xfs_qm_quotacheck.
  723. */
  724. #ifdef DEBUG
  725. j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
  726. sizeof(struct xfs_dqblk);
  727. ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
  728. #endif
  729. dqb = bp->b_addr;
  730. for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
  731. struct xfs_disk_dquot *ddq;
  732. ddq = (struct xfs_disk_dquot *)&dqb[j];
  733. /*
  734. * Do a sanity check, and if needed, repair the dqblk. Don't
  735. * output any warnings because it's perfectly possible to
  736. * find uninitialised dquot blks. See comment in
  737. * xfs_dquot_verify.
  738. */
  739. if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
  740. (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
  741. xfs_dqblk_repair(mp, &dqb[j], id + j, type);
  742. /*
  743. * Reset type in case we are reusing group quota file for
  744. * project quotas or vice versa
  745. */
  746. ddq->d_type = type;
  747. ddq->d_bcount = 0;
  748. ddq->d_icount = 0;
  749. ddq->d_rtbcount = 0;
  750. /*
  751. * dquot id 0 stores the default grace period and the maximum
  752. * warning limit that were set by the administrator, so we
  753. * should not reset them.
  754. */
  755. if (ddq->d_id != 0) {
  756. ddq->d_btimer = 0;
  757. ddq->d_itimer = 0;
  758. ddq->d_rtbtimer = 0;
  759. ddq->d_bwarns = 0;
  760. ddq->d_iwarns = 0;
  761. ddq->d_rtbwarns = 0;
  762. if (xfs_has_bigtime(mp))
  763. ddq->d_type |= XFS_DQTYPE_BIGTIME;
  764. }
  765. if (xfs_has_crc(mp)) {
  766. xfs_update_cksum((char *)&dqb[j],
  767. sizeof(struct xfs_dqblk),
  768. XFS_DQUOT_CRC_OFF);
  769. }
  770. }
  771. }
  772. STATIC int
  773. xfs_qm_reset_dqcounts_all(
  774. struct xfs_mount *mp,
  775. xfs_dqid_t firstid,
  776. xfs_fsblock_t bno,
  777. xfs_filblks_t blkcnt,
  778. xfs_dqtype_t type,
  779. struct list_head *buffer_list)
  780. {
  781. struct xfs_buf *bp;
  782. int error = 0;
  783. ASSERT(blkcnt > 0);
  784. /*
  785. * Blkcnt arg can be a very big number, and might even be
  786. * larger than the log itself. So, we have to break it up into
  787. * manageable-sized transactions.
  788. * Note that we don't start a permanent transaction here; we might
  789. * not be able to get a log reservation for the whole thing up front,
  790. * and we don't really care to either, because we just discard
  791. * everything if we were to crash in the middle of this loop.
  792. */
  793. while (blkcnt--) {
  794. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  795. XFS_FSB_TO_DADDR(mp, bno),
  796. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  797. &xfs_dquot_buf_ops);
  798. /*
  799. * CRC and validation errors will return a EFSCORRUPTED here. If
  800. * this occurs, re-read without CRC validation so that we can
  801. * repair the damage via xfs_qm_reset_dqcounts(). This process
  802. * will leave a trace in the log indicating corruption has
  803. * been detected.
  804. */
  805. if (error == -EFSCORRUPTED) {
  806. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  807. XFS_FSB_TO_DADDR(mp, bno),
  808. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  809. NULL);
  810. }
  811. if (error)
  812. break;
  813. /*
  814. * A corrupt buffer might not have a verifier attached, so
  815. * make sure we have the correct one attached before writeback
  816. * occurs.
  817. */
  818. bp->b_ops = &xfs_dquot_buf_ops;
  819. xfs_qm_reset_dqcounts(mp, bp, firstid, type);
  820. xfs_buf_delwri_queue(bp, buffer_list);
  821. xfs_buf_relse(bp);
  822. /* goto the next block. */
  823. bno++;
  824. firstid += mp->m_quotainfo->qi_dqperchunk;
  825. }
  826. return error;
  827. }
  828. /*
  829. * Iterate over all allocated dquot blocks in this quota inode, zeroing all
  830. * counters for every chunk of dquots that we find.
  831. */
  832. STATIC int
  833. xfs_qm_reset_dqcounts_buf(
  834. struct xfs_mount *mp,
  835. struct xfs_inode *qip,
  836. xfs_dqtype_t type,
  837. struct list_head *buffer_list)
  838. {
  839. struct xfs_bmbt_irec *map;
  840. int i, nmaps; /* number of map entries */
  841. int error; /* return value */
  842. xfs_fileoff_t lblkno;
  843. xfs_filblks_t maxlblkcnt;
  844. xfs_dqid_t firstid;
  845. xfs_fsblock_t rablkno;
  846. xfs_filblks_t rablkcnt;
  847. error = 0;
  848. /*
  849. * This looks racy, but we can't keep an inode lock across a
  850. * trans_reserve. But, this gets called during quotacheck, and that
  851. * happens only at mount time which is single threaded.
  852. */
  853. if (qip->i_nblocks == 0)
  854. return 0;
  855. map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
  856. lblkno = 0;
  857. maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  858. do {
  859. uint lock_mode;
  860. nmaps = XFS_DQITER_MAP_SIZE;
  861. /*
  862. * We aren't changing the inode itself. Just changing
  863. * some of its data. No new blocks are added here, and
  864. * the inode is never added to the transaction.
  865. */
  866. lock_mode = xfs_ilock_data_map_shared(qip);
  867. error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
  868. map, &nmaps, 0);
  869. xfs_iunlock(qip, lock_mode);
  870. if (error)
  871. break;
  872. ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
  873. for (i = 0; i < nmaps; i++) {
  874. ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
  875. ASSERT(map[i].br_blockcount);
  876. lblkno += map[i].br_blockcount;
  877. if (map[i].br_startblock == HOLESTARTBLOCK)
  878. continue;
  879. firstid = (xfs_dqid_t) map[i].br_startoff *
  880. mp->m_quotainfo->qi_dqperchunk;
  881. /*
  882. * Do a read-ahead on the next extent.
  883. */
  884. if ((i+1 < nmaps) &&
  885. (map[i+1].br_startblock != HOLESTARTBLOCK)) {
  886. rablkcnt = map[i+1].br_blockcount;
  887. rablkno = map[i+1].br_startblock;
  888. while (rablkcnt--) {
  889. xfs_buf_readahead(mp->m_ddev_targp,
  890. XFS_FSB_TO_DADDR(mp, rablkno),
  891. mp->m_quotainfo->qi_dqchunklen,
  892. &xfs_dquot_buf_ops);
  893. rablkno++;
  894. }
  895. }
  896. /*
  897. * Iterate thru all the blks in the extent and
  898. * reset the counters of all the dquots inside them.
  899. */
  900. error = xfs_qm_reset_dqcounts_all(mp, firstid,
  901. map[i].br_startblock,
  902. map[i].br_blockcount,
  903. type, buffer_list);
  904. if (error)
  905. goto out;
  906. }
  907. } while (nmaps > 0);
  908. out:
  909. kmem_free(map);
  910. return error;
  911. }
  912. /*
  913. * Called by dqusage_adjust in doing a quotacheck.
  914. *
  915. * Given the inode, and a dquot id this updates both the incore dqout as well
  916. * as the buffer copy. This is so that once the quotacheck is done, we can
  917. * just log all the buffers, as opposed to logging numerous updates to
  918. * individual dquots.
  919. */
  920. STATIC int
  921. xfs_qm_quotacheck_dqadjust(
  922. struct xfs_inode *ip,
  923. xfs_dqtype_t type,
  924. xfs_qcnt_t nblks,
  925. xfs_qcnt_t rtblks)
  926. {
  927. struct xfs_mount *mp = ip->i_mount;
  928. struct xfs_dquot *dqp;
  929. xfs_dqid_t id;
  930. int error;
  931. id = xfs_qm_id_for_quotatype(ip, type);
  932. error = xfs_qm_dqget(mp, id, type, true, &dqp);
  933. if (error) {
  934. /*
  935. * Shouldn't be able to turn off quotas here.
  936. */
  937. ASSERT(error != -ESRCH);
  938. ASSERT(error != -ENOENT);
  939. return error;
  940. }
  941. trace_xfs_dqadjust(dqp);
  942. /*
  943. * Adjust the inode count and the block count to reflect this inode's
  944. * resource usage.
  945. */
  946. dqp->q_ino.count++;
  947. dqp->q_ino.reserved++;
  948. if (nblks) {
  949. dqp->q_blk.count += nblks;
  950. dqp->q_blk.reserved += nblks;
  951. }
  952. if (rtblks) {
  953. dqp->q_rtb.count += rtblks;
  954. dqp->q_rtb.reserved += rtblks;
  955. }
  956. /*
  957. * Set default limits, adjust timers (since we changed usages)
  958. *
  959. * There are no timers for the default values set in the root dquot.
  960. */
  961. if (dqp->q_id) {
  962. xfs_qm_adjust_dqlimits(dqp);
  963. xfs_qm_adjust_dqtimers(dqp);
  964. }
  965. dqp->q_flags |= XFS_DQFLAG_DIRTY;
  966. xfs_qm_dqput(dqp);
  967. return 0;
  968. }
  969. /*
  970. * callback routine supplied to bulkstat(). Given an inumber, find its
  971. * dquots and update them to account for resources taken by that inode.
  972. */
  973. /* ARGSUSED */
  974. STATIC int
  975. xfs_qm_dqusage_adjust(
  976. struct xfs_mount *mp,
  977. struct xfs_trans *tp,
  978. xfs_ino_t ino,
  979. void *data)
  980. {
  981. struct xfs_inode *ip;
  982. xfs_qcnt_t nblks;
  983. xfs_filblks_t rtblks = 0; /* total rt blks */
  984. int error;
  985. ASSERT(XFS_IS_QUOTA_ON(mp));
  986. /*
  987. * rootino must have its resources accounted for, not so with the quota
  988. * inodes.
  989. */
  990. if (xfs_is_quota_inode(&mp->m_sb, ino))
  991. return 0;
  992. /*
  993. * We don't _need_ to take the ilock EXCL here because quotacheck runs
  994. * at mount time and therefore nobody will be racing chown/chproj.
  995. */
  996. error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
  997. if (error == -EINVAL || error == -ENOENT)
  998. return 0;
  999. if (error)
  1000. return error;
  1001. ASSERT(ip->i_delayed_blks == 0);
  1002. if (XFS_IS_REALTIME_INODE(ip)) {
  1003. struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
  1004. error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
  1005. if (error)
  1006. goto error0;
  1007. xfs_bmap_count_leaves(ifp, &rtblks);
  1008. }
  1009. nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
  1010. /*
  1011. * Add the (disk blocks and inode) resources occupied by this
  1012. * inode to its dquots. We do this adjustment in the incore dquot,
  1013. * and also copy the changes to its buffer.
  1014. * We don't care about putting these changes in a transaction
  1015. * envelope because if we crash in the middle of a 'quotacheck'
  1016. * we have to start from the beginning anyway.
  1017. * Once we're done, we'll log all the dquot bufs.
  1018. *
  1019. * The *QUOTA_ON checks below may look pretty racy, but quotachecks
  1020. * and quotaoffs don't race. (Quotachecks happen at mount time only).
  1021. */
  1022. if (XFS_IS_UQUOTA_ON(mp)) {
  1023. error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
  1024. rtblks);
  1025. if (error)
  1026. goto error0;
  1027. }
  1028. if (XFS_IS_GQUOTA_ON(mp)) {
  1029. error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
  1030. rtblks);
  1031. if (error)
  1032. goto error0;
  1033. }
  1034. if (XFS_IS_PQUOTA_ON(mp)) {
  1035. error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
  1036. rtblks);
  1037. if (error)
  1038. goto error0;
  1039. }
  1040. error0:
  1041. xfs_irele(ip);
  1042. return error;
  1043. }
  1044. STATIC int
  1045. xfs_qm_flush_one(
  1046. struct xfs_dquot *dqp,
  1047. void *data)
  1048. {
  1049. struct xfs_mount *mp = dqp->q_mount;
  1050. struct list_head *buffer_list = data;
  1051. struct xfs_buf *bp = NULL;
  1052. int error = 0;
  1053. xfs_dqlock(dqp);
  1054. if (dqp->q_flags & XFS_DQFLAG_FREEING)
  1055. goto out_unlock;
  1056. if (!XFS_DQ_IS_DIRTY(dqp))
  1057. goto out_unlock;
  1058. /*
  1059. * The only way the dquot is already flush locked by the time quotacheck
  1060. * gets here is if reclaim flushed it before the dqadjust walk dirtied
  1061. * it for the final time. Quotacheck collects all dquot bufs in the
  1062. * local delwri queue before dquots are dirtied, so reclaim can't have
  1063. * possibly queued it for I/O. The only way out is to push the buffer to
  1064. * cycle the flush lock.
  1065. */
  1066. if (!xfs_dqflock_nowait(dqp)) {
  1067. /* buf is pinned in-core by delwri list */
  1068. error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
  1069. mp->m_quotainfo->qi_dqchunklen, 0, &bp);
  1070. if (error)
  1071. goto out_unlock;
  1072. if (!(bp->b_flags & _XBF_DELWRI_Q)) {
  1073. error = -EAGAIN;
  1074. xfs_buf_relse(bp);
  1075. goto out_unlock;
  1076. }
  1077. xfs_buf_unlock(bp);
  1078. xfs_buf_delwri_pushbuf(bp, buffer_list);
  1079. xfs_buf_rele(bp);
  1080. error = -EAGAIN;
  1081. goto out_unlock;
  1082. }
  1083. error = xfs_qm_dqflush(dqp, &bp);
  1084. if (error)
  1085. goto out_unlock;
  1086. xfs_buf_delwri_queue(bp, buffer_list);
  1087. xfs_buf_relse(bp);
  1088. out_unlock:
  1089. xfs_dqunlock(dqp);
  1090. return error;
  1091. }
  1092. /*
  1093. * Walk thru all the filesystem inodes and construct a consistent view
  1094. * of the disk quota world. If the quotacheck fails, disable quotas.
  1095. */
  1096. STATIC int
  1097. xfs_qm_quotacheck(
  1098. xfs_mount_t *mp)
  1099. {
  1100. int error, error2;
  1101. uint flags;
  1102. LIST_HEAD (buffer_list);
  1103. struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
  1104. struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
  1105. struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
  1106. flags = 0;
  1107. ASSERT(uip || gip || pip);
  1108. ASSERT(XFS_IS_QUOTA_ON(mp));
  1109. xfs_notice(mp, "Quotacheck needed: Please wait.");
  1110. /*
  1111. * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
  1112. * their counters to zero. We need a clean slate.
  1113. * We don't log our changes till later.
  1114. */
  1115. if (uip) {
  1116. error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
  1117. &buffer_list);
  1118. if (error)
  1119. goto error_return;
  1120. flags |= XFS_UQUOTA_CHKD;
  1121. }
  1122. if (gip) {
  1123. error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
  1124. &buffer_list);
  1125. if (error)
  1126. goto error_return;
  1127. flags |= XFS_GQUOTA_CHKD;
  1128. }
  1129. if (pip) {
  1130. error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
  1131. &buffer_list);
  1132. if (error)
  1133. goto error_return;
  1134. flags |= XFS_PQUOTA_CHKD;
  1135. }
  1136. error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
  1137. NULL);
  1138. if (error) {
  1139. /*
  1140. * The inode walk may have partially populated the dquot
  1141. * caches. We must purge them before disabling quota and
  1142. * tearing down the quotainfo, or else the dquots will leak.
  1143. */
  1144. xfs_qm_dqpurge_all(mp);
  1145. goto error_return;
  1146. }
  1147. /*
  1148. * We've made all the changes that we need to make incore. Flush them
  1149. * down to disk buffers if everything was updated successfully.
  1150. */
  1151. if (XFS_IS_UQUOTA_ON(mp)) {
  1152. error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
  1153. &buffer_list);
  1154. }
  1155. if (XFS_IS_GQUOTA_ON(mp)) {
  1156. error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
  1157. &buffer_list);
  1158. if (!error)
  1159. error = error2;
  1160. }
  1161. if (XFS_IS_PQUOTA_ON(mp)) {
  1162. error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
  1163. &buffer_list);
  1164. if (!error)
  1165. error = error2;
  1166. }
  1167. error2 = xfs_buf_delwri_submit(&buffer_list);
  1168. if (!error)
  1169. error = error2;
  1170. /*
  1171. * We can get this error if we couldn't do a dquot allocation inside
  1172. * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
  1173. * dirty dquots that might be cached, we just want to get rid of them
  1174. * and turn quotaoff. The dquots won't be attached to any of the inodes
  1175. * at this point (because we intentionally didn't in dqget_noattach).
  1176. */
  1177. if (error) {
  1178. xfs_qm_dqpurge_all(mp);
  1179. goto error_return;
  1180. }
  1181. /*
  1182. * If one type of quotas is off, then it will lose its
  1183. * quotachecked status, since we won't be doing accounting for
  1184. * that type anymore.
  1185. */
  1186. mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
  1187. mp->m_qflags |= flags;
  1188. error_return:
  1189. xfs_buf_delwri_cancel(&buffer_list);
  1190. if (error) {
  1191. xfs_warn(mp,
  1192. "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
  1193. error);
  1194. /*
  1195. * We must turn off quotas.
  1196. */
  1197. ASSERT(mp->m_quotainfo != NULL);
  1198. xfs_qm_destroy_quotainfo(mp);
  1199. if (xfs_mount_reset_sbqflags(mp)) {
  1200. xfs_warn(mp,
  1201. "Quotacheck: Failed to reset quota flags.");
  1202. }
  1203. } else
  1204. xfs_notice(mp, "Quotacheck: Done.");
  1205. return error;
  1206. }
  1207. /*
  1208. * This is called from xfs_mountfs to start quotas and initialize all
  1209. * necessary data structures like quotainfo. This is also responsible for
  1210. * running a quotacheck as necessary. We are guaranteed that the superblock
  1211. * is consistently read in at this point.
  1212. *
  1213. * If we fail here, the mount will continue with quota turned off. We don't
  1214. * need to inidicate success or failure at all.
  1215. */
  1216. void
  1217. xfs_qm_mount_quotas(
  1218. struct xfs_mount *mp)
  1219. {
  1220. int error = 0;
  1221. uint sbf;
  1222. /*
  1223. * If quotas on realtime volumes is not supported, we disable
  1224. * quotas immediately.
  1225. */
  1226. if (mp->m_sb.sb_rextents) {
  1227. xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
  1228. mp->m_qflags = 0;
  1229. goto write_changes;
  1230. }
  1231. ASSERT(XFS_IS_QUOTA_ON(mp));
  1232. /*
  1233. * Allocate the quotainfo structure inside the mount struct, and
  1234. * create quotainode(s), and change/rev superblock if necessary.
  1235. */
  1236. error = xfs_qm_init_quotainfo(mp);
  1237. if (error) {
  1238. /*
  1239. * We must turn off quotas.
  1240. */
  1241. ASSERT(mp->m_quotainfo == NULL);
  1242. mp->m_qflags = 0;
  1243. goto write_changes;
  1244. }
  1245. /*
  1246. * If any of the quotas are not consistent, do a quotacheck.
  1247. */
  1248. if (XFS_QM_NEED_QUOTACHECK(mp)) {
  1249. error = xfs_qm_quotacheck(mp);
  1250. if (error) {
  1251. /* Quotacheck failed and disabled quotas. */
  1252. return;
  1253. }
  1254. }
  1255. /*
  1256. * If one type of quotas is off, then it will lose its
  1257. * quotachecked status, since we won't be doing accounting for
  1258. * that type anymore.
  1259. */
  1260. if (!XFS_IS_UQUOTA_ON(mp))
  1261. mp->m_qflags &= ~XFS_UQUOTA_CHKD;
  1262. if (!XFS_IS_GQUOTA_ON(mp))
  1263. mp->m_qflags &= ~XFS_GQUOTA_CHKD;
  1264. if (!XFS_IS_PQUOTA_ON(mp))
  1265. mp->m_qflags &= ~XFS_PQUOTA_CHKD;
  1266. write_changes:
  1267. /*
  1268. * We actually don't have to acquire the m_sb_lock at all.
  1269. * This can only be called from mount, and that's single threaded. XXX
  1270. */
  1271. spin_lock(&mp->m_sb_lock);
  1272. sbf = mp->m_sb.sb_qflags;
  1273. mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
  1274. spin_unlock(&mp->m_sb_lock);
  1275. if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
  1276. if (xfs_sync_sb(mp, false)) {
  1277. /*
  1278. * We could only have been turning quotas off.
  1279. * We aren't in very good shape actually because
  1280. * the incore structures are convinced that quotas are
  1281. * off, but the on disk superblock doesn't know that !
  1282. */
  1283. ASSERT(!(XFS_IS_QUOTA_ON(mp)));
  1284. xfs_alert(mp, "%s: Superblock update failed!",
  1285. __func__);
  1286. }
  1287. }
  1288. if (error) {
  1289. xfs_warn(mp, "Failed to initialize disk quotas.");
  1290. return;
  1291. }
  1292. }
  1293. /*
  1294. * This is called after the superblock has been read in and we're ready to
  1295. * iget the quota inodes.
  1296. */
  1297. STATIC int
  1298. xfs_qm_init_quotainos(
  1299. xfs_mount_t *mp)
  1300. {
  1301. struct xfs_inode *uip = NULL;
  1302. struct xfs_inode *gip = NULL;
  1303. struct xfs_inode *pip = NULL;
  1304. int error;
  1305. uint flags = 0;
  1306. ASSERT(mp->m_quotainfo);
  1307. /*
  1308. * Get the uquota and gquota inodes
  1309. */
  1310. if (xfs_has_quota(mp)) {
  1311. if (XFS_IS_UQUOTA_ON(mp) &&
  1312. mp->m_sb.sb_uquotino != NULLFSINO) {
  1313. ASSERT(mp->m_sb.sb_uquotino > 0);
  1314. error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
  1315. 0, 0, &uip);
  1316. if (error)
  1317. return error;
  1318. }
  1319. if (XFS_IS_GQUOTA_ON(mp) &&
  1320. mp->m_sb.sb_gquotino != NULLFSINO) {
  1321. ASSERT(mp->m_sb.sb_gquotino > 0);
  1322. error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
  1323. 0, 0, &gip);
  1324. if (error)
  1325. goto error_rele;
  1326. }
  1327. if (XFS_IS_PQUOTA_ON(mp) &&
  1328. mp->m_sb.sb_pquotino != NULLFSINO) {
  1329. ASSERT(mp->m_sb.sb_pquotino > 0);
  1330. error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
  1331. 0, 0, &pip);
  1332. if (error)
  1333. goto error_rele;
  1334. }
  1335. } else {
  1336. flags |= XFS_QMOPT_SBVERSION;
  1337. }
  1338. /*
  1339. * Create the three inodes, if they don't exist already. The changes
  1340. * made above will get added to a transaction and logged in one of
  1341. * the qino_alloc calls below. If the device is readonly,
  1342. * temporarily switch to read-write to do this.
  1343. */
  1344. if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
  1345. error = xfs_qm_qino_alloc(mp, &uip,
  1346. flags | XFS_QMOPT_UQUOTA);
  1347. if (error)
  1348. goto error_rele;
  1349. flags &= ~XFS_QMOPT_SBVERSION;
  1350. }
  1351. if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
  1352. error = xfs_qm_qino_alloc(mp, &gip,
  1353. flags | XFS_QMOPT_GQUOTA);
  1354. if (error)
  1355. goto error_rele;
  1356. flags &= ~XFS_QMOPT_SBVERSION;
  1357. }
  1358. if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
  1359. error = xfs_qm_qino_alloc(mp, &pip,
  1360. flags | XFS_QMOPT_PQUOTA);
  1361. if (error)
  1362. goto error_rele;
  1363. }
  1364. mp->m_quotainfo->qi_uquotaip = uip;
  1365. mp->m_quotainfo->qi_gquotaip = gip;
  1366. mp->m_quotainfo->qi_pquotaip = pip;
  1367. return 0;
  1368. error_rele:
  1369. if (uip)
  1370. xfs_irele(uip);
  1371. if (gip)
  1372. xfs_irele(gip);
  1373. if (pip)
  1374. xfs_irele(pip);
  1375. return error;
  1376. }
  1377. STATIC void
  1378. xfs_qm_destroy_quotainos(
  1379. struct xfs_quotainfo *qi)
  1380. {
  1381. if (qi->qi_uquotaip) {
  1382. xfs_irele(qi->qi_uquotaip);
  1383. qi->qi_uquotaip = NULL; /* paranoia */
  1384. }
  1385. if (qi->qi_gquotaip) {
  1386. xfs_irele(qi->qi_gquotaip);
  1387. qi->qi_gquotaip = NULL;
  1388. }
  1389. if (qi->qi_pquotaip) {
  1390. xfs_irele(qi->qi_pquotaip);
  1391. qi->qi_pquotaip = NULL;
  1392. }
  1393. }
  1394. STATIC void
  1395. xfs_qm_dqfree_one(
  1396. struct xfs_dquot *dqp)
  1397. {
  1398. struct xfs_mount *mp = dqp->q_mount;
  1399. struct xfs_quotainfo *qi = mp->m_quotainfo;
  1400. mutex_lock(&qi->qi_tree_lock);
  1401. radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
  1402. qi->qi_dquots--;
  1403. mutex_unlock(&qi->qi_tree_lock);
  1404. xfs_qm_dqdestroy(dqp);
  1405. }
  1406. /* --------------- utility functions for vnodeops ---------------- */
  1407. /*
  1408. * Given an inode, a uid, gid and prid make sure that we have
  1409. * allocated relevant dquot(s) on disk, and that we won't exceed inode
  1410. * quotas by creating this file.
  1411. * This also attaches dquot(s) to the given inode after locking it,
  1412. * and returns the dquots corresponding to the uid and/or gid.
  1413. *
  1414. * in : inode (unlocked)
  1415. * out : udquot, gdquot with references taken and unlocked
  1416. */
  1417. int
  1418. xfs_qm_vop_dqalloc(
  1419. struct xfs_inode *ip,
  1420. kuid_t uid,
  1421. kgid_t gid,
  1422. prid_t prid,
  1423. uint flags,
  1424. struct xfs_dquot **O_udqpp,
  1425. struct xfs_dquot **O_gdqpp,
  1426. struct xfs_dquot **O_pdqpp)
  1427. {
  1428. struct xfs_mount *mp = ip->i_mount;
  1429. struct inode *inode = VFS_I(ip);
  1430. struct user_namespace *user_ns = inode->i_sb->s_user_ns;
  1431. struct xfs_dquot *uq = NULL;
  1432. struct xfs_dquot *gq = NULL;
  1433. struct xfs_dquot *pq = NULL;
  1434. int error;
  1435. uint lockflags;
  1436. if (!XFS_IS_QUOTA_ON(mp))
  1437. return 0;
  1438. lockflags = XFS_ILOCK_EXCL;
  1439. xfs_ilock(ip, lockflags);
  1440. if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
  1441. gid = inode->i_gid;
  1442. /*
  1443. * Attach the dquot(s) to this inode, doing a dquot allocation
  1444. * if necessary. The dquot(s) will not be locked.
  1445. */
  1446. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1447. error = xfs_qm_dqattach_locked(ip, true);
  1448. if (error) {
  1449. xfs_iunlock(ip, lockflags);
  1450. return error;
  1451. }
  1452. }
  1453. if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
  1454. ASSERT(O_udqpp);
  1455. if (!uid_eq(inode->i_uid, uid)) {
  1456. /*
  1457. * What we need is the dquot that has this uid, and
  1458. * if we send the inode to dqget, the uid of the inode
  1459. * takes priority over what's sent in the uid argument.
  1460. * We must unlock inode here before calling dqget if
  1461. * we're not sending the inode, because otherwise
  1462. * we'll deadlock by doing trans_reserve while
  1463. * holding ilock.
  1464. */
  1465. xfs_iunlock(ip, lockflags);
  1466. error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
  1467. XFS_DQTYPE_USER, true, &uq);
  1468. if (error) {
  1469. ASSERT(error != -ENOENT);
  1470. return error;
  1471. }
  1472. /*
  1473. * Get the ilock in the right order.
  1474. */
  1475. xfs_dqunlock(uq);
  1476. lockflags = XFS_ILOCK_SHARED;
  1477. xfs_ilock(ip, lockflags);
  1478. } else {
  1479. /*
  1480. * Take an extra reference, because we'll return
  1481. * this to caller
  1482. */
  1483. ASSERT(ip->i_udquot);
  1484. uq = xfs_qm_dqhold(ip->i_udquot);
  1485. }
  1486. }
  1487. if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
  1488. ASSERT(O_gdqpp);
  1489. if (!gid_eq(inode->i_gid, gid)) {
  1490. xfs_iunlock(ip, lockflags);
  1491. error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
  1492. XFS_DQTYPE_GROUP, true, &gq);
  1493. if (error) {
  1494. ASSERT(error != -ENOENT);
  1495. goto error_rele;
  1496. }
  1497. xfs_dqunlock(gq);
  1498. lockflags = XFS_ILOCK_SHARED;
  1499. xfs_ilock(ip, lockflags);
  1500. } else {
  1501. ASSERT(ip->i_gdquot);
  1502. gq = xfs_qm_dqhold(ip->i_gdquot);
  1503. }
  1504. }
  1505. if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
  1506. ASSERT(O_pdqpp);
  1507. if (ip->i_projid != prid) {
  1508. xfs_iunlock(ip, lockflags);
  1509. error = xfs_qm_dqget(mp, prid,
  1510. XFS_DQTYPE_PROJ, true, &pq);
  1511. if (error) {
  1512. ASSERT(error != -ENOENT);
  1513. goto error_rele;
  1514. }
  1515. xfs_dqunlock(pq);
  1516. lockflags = XFS_ILOCK_SHARED;
  1517. xfs_ilock(ip, lockflags);
  1518. } else {
  1519. ASSERT(ip->i_pdquot);
  1520. pq = xfs_qm_dqhold(ip->i_pdquot);
  1521. }
  1522. }
  1523. trace_xfs_dquot_dqalloc(ip);
  1524. xfs_iunlock(ip, lockflags);
  1525. if (O_udqpp)
  1526. *O_udqpp = uq;
  1527. else
  1528. xfs_qm_dqrele(uq);
  1529. if (O_gdqpp)
  1530. *O_gdqpp = gq;
  1531. else
  1532. xfs_qm_dqrele(gq);
  1533. if (O_pdqpp)
  1534. *O_pdqpp = pq;
  1535. else
  1536. xfs_qm_dqrele(pq);
  1537. return 0;
  1538. error_rele:
  1539. xfs_qm_dqrele(gq);
  1540. xfs_qm_dqrele(uq);
  1541. return error;
  1542. }
  1543. /*
  1544. * Actually transfer ownership, and do dquot modifications.
  1545. * These were already reserved.
  1546. */
  1547. struct xfs_dquot *
  1548. xfs_qm_vop_chown(
  1549. struct xfs_trans *tp,
  1550. struct xfs_inode *ip,
  1551. struct xfs_dquot **IO_olddq,
  1552. struct xfs_dquot *newdq)
  1553. {
  1554. struct xfs_dquot *prevdq;
  1555. uint bfield = XFS_IS_REALTIME_INODE(ip) ?
  1556. XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
  1557. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1558. ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
  1559. /* old dquot */
  1560. prevdq = *IO_olddq;
  1561. ASSERT(prevdq);
  1562. ASSERT(prevdq != newdq);
  1563. xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
  1564. xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
  1565. /* the sparkling new dquot */
  1566. xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
  1567. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
  1568. /*
  1569. * Back when we made quota reservations for the chown, we reserved the
  1570. * ondisk blocks + delalloc blocks with the new dquot. Now that we've
  1571. * switched the dquots, decrease the new dquot's block reservation
  1572. * (having already bumped up the real counter) so that we don't have
  1573. * any reservation to give back when we commit.
  1574. */
  1575. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
  1576. -ip->i_delayed_blks);
  1577. /*
  1578. * Give the incore reservation for delalloc blocks back to the old
  1579. * dquot. We don't normally handle delalloc quota reservations
  1580. * transactionally, so just lock the dquot and subtract from the
  1581. * reservation. Dirty the transaction because it's too late to turn
  1582. * back now.
  1583. */
  1584. tp->t_flags |= XFS_TRANS_DIRTY;
  1585. xfs_dqlock(prevdq);
  1586. ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
  1587. prevdq->q_blk.reserved -= ip->i_delayed_blks;
  1588. xfs_dqunlock(prevdq);
  1589. /*
  1590. * Take an extra reference, because the inode is going to keep
  1591. * this dquot pointer even after the trans_commit.
  1592. */
  1593. *IO_olddq = xfs_qm_dqhold(newdq);
  1594. return prevdq;
  1595. }
  1596. int
  1597. xfs_qm_vop_rename_dqattach(
  1598. struct xfs_inode **i_tab)
  1599. {
  1600. struct xfs_mount *mp = i_tab[0]->i_mount;
  1601. int i;
  1602. if (!XFS_IS_QUOTA_ON(mp))
  1603. return 0;
  1604. for (i = 0; (i < 4 && i_tab[i]); i++) {
  1605. struct xfs_inode *ip = i_tab[i];
  1606. int error;
  1607. /*
  1608. * Watch out for duplicate entries in the table.
  1609. */
  1610. if (i == 0 || ip != i_tab[i-1]) {
  1611. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1612. error = xfs_qm_dqattach(ip);
  1613. if (error)
  1614. return error;
  1615. }
  1616. }
  1617. }
  1618. return 0;
  1619. }
  1620. void
  1621. xfs_qm_vop_create_dqattach(
  1622. struct xfs_trans *tp,
  1623. struct xfs_inode *ip,
  1624. struct xfs_dquot *udqp,
  1625. struct xfs_dquot *gdqp,
  1626. struct xfs_dquot *pdqp)
  1627. {
  1628. struct xfs_mount *mp = tp->t_mountp;
  1629. if (!XFS_IS_QUOTA_ON(mp))
  1630. return;
  1631. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1632. if (udqp && XFS_IS_UQUOTA_ON(mp)) {
  1633. ASSERT(ip->i_udquot == NULL);
  1634. ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
  1635. ip->i_udquot = xfs_qm_dqhold(udqp);
  1636. xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
  1637. }
  1638. if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
  1639. ASSERT(ip->i_gdquot == NULL);
  1640. ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
  1641. ip->i_gdquot = xfs_qm_dqhold(gdqp);
  1642. xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1643. }
  1644. if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
  1645. ASSERT(ip->i_pdquot == NULL);
  1646. ASSERT(ip->i_projid == pdqp->q_id);
  1647. ip->i_pdquot = xfs_qm_dqhold(pdqp);
  1648. xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1649. }
  1650. }
  1651. /* Decide if this inode's dquot is near an enforcement boundary. */
  1652. bool
  1653. xfs_inode_near_dquot_enforcement(
  1654. struct xfs_inode *ip,
  1655. xfs_dqtype_t type)
  1656. {
  1657. struct xfs_dquot *dqp;
  1658. int64_t freesp;
  1659. /* We only care for quotas that are enabled and enforced. */
  1660. dqp = xfs_inode_dquot(ip, type);
  1661. if (!dqp || !xfs_dquot_is_enforced(dqp))
  1662. return false;
  1663. if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
  1664. xfs_dquot_res_over_limits(&dqp->q_rtb))
  1665. return true;
  1666. /* For space on the data device, check the various thresholds. */
  1667. if (!dqp->q_prealloc_hi_wmark)
  1668. return false;
  1669. if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
  1670. return false;
  1671. if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
  1672. return true;
  1673. freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
  1674. if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
  1675. return true;
  1676. return false;
  1677. }