xfs_trans_dquot.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_inode.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_trans_priv.h"
  16. #include "xfs_quota.h"
  17. #include "xfs_qm.h"
  18. #include "xfs_trace.h"
  19. #include "xfs_error.h"
  20. STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
  21. /*
  22. * Add the locked dquot to the transaction.
  23. * The dquot must be locked, and it cannot be associated with any
  24. * transaction.
  25. */
  26. void
  27. xfs_trans_dqjoin(
  28. struct xfs_trans *tp,
  29. struct xfs_dquot *dqp)
  30. {
  31. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  32. ASSERT(dqp->q_logitem.qli_dquot == dqp);
  33. /*
  34. * Get a log_item_desc to point at the new item.
  35. */
  36. xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
  37. }
  38. /*
  39. * This is called to mark the dquot as needing
  40. * to be logged when the transaction is committed. The dquot must
  41. * already be associated with the given transaction.
  42. * Note that it marks the entire transaction as dirty. In the ordinary
  43. * case, this gets called via xfs_trans_commit, after the transaction
  44. * is already dirty. However, there's nothing stop this from getting
  45. * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
  46. * flag.
  47. */
  48. void
  49. xfs_trans_log_dquot(
  50. struct xfs_trans *tp,
  51. struct xfs_dquot *dqp)
  52. {
  53. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  54. /* Upgrade the dquot to bigtime format if possible. */
  55. if (dqp->q_id != 0 &&
  56. xfs_has_bigtime(tp->t_mountp) &&
  57. !(dqp->q_type & XFS_DQTYPE_BIGTIME))
  58. dqp->q_type |= XFS_DQTYPE_BIGTIME;
  59. tp->t_flags |= XFS_TRANS_DIRTY;
  60. set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
  61. }
  62. /*
  63. * Carry forward whatever is left of the quota blk reservation to
  64. * the spanky new transaction
  65. */
  66. void
  67. xfs_trans_dup_dqinfo(
  68. struct xfs_trans *otp,
  69. struct xfs_trans *ntp)
  70. {
  71. struct xfs_dqtrx *oq, *nq;
  72. int i, j;
  73. struct xfs_dqtrx *oqa, *nqa;
  74. uint64_t blk_res_used;
  75. if (!otp->t_dqinfo)
  76. return;
  77. xfs_trans_alloc_dqinfo(ntp);
  78. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  79. oqa = otp->t_dqinfo->dqs[j];
  80. nqa = ntp->t_dqinfo->dqs[j];
  81. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  82. blk_res_used = 0;
  83. if (oqa[i].qt_dquot == NULL)
  84. break;
  85. oq = &oqa[i];
  86. nq = &nqa[i];
  87. if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
  88. blk_res_used = oq->qt_bcount_delta;
  89. nq->qt_dquot = oq->qt_dquot;
  90. nq->qt_bcount_delta = nq->qt_icount_delta = 0;
  91. nq->qt_rtbcount_delta = 0;
  92. /*
  93. * Transfer whatever is left of the reservations.
  94. */
  95. nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
  96. oq->qt_blk_res = blk_res_used;
  97. nq->qt_rtblk_res = oq->qt_rtblk_res -
  98. oq->qt_rtblk_res_used;
  99. oq->qt_rtblk_res = oq->qt_rtblk_res_used;
  100. nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
  101. oq->qt_ino_res = oq->qt_ino_res_used;
  102. }
  103. }
  104. }
  105. /*
  106. * Wrap around mod_dquot to account for both user and group quotas.
  107. */
  108. void
  109. xfs_trans_mod_dquot_byino(
  110. xfs_trans_t *tp,
  111. xfs_inode_t *ip,
  112. uint field,
  113. int64_t delta)
  114. {
  115. xfs_mount_t *mp = tp->t_mountp;
  116. if (!XFS_IS_QUOTA_ON(mp) ||
  117. xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  118. return;
  119. if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
  120. (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
  121. if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
  122. (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
  123. if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
  124. (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
  125. }
  126. STATIC struct xfs_dqtrx *
  127. xfs_trans_get_dqtrx(
  128. struct xfs_trans *tp,
  129. struct xfs_dquot *dqp)
  130. {
  131. int i;
  132. struct xfs_dqtrx *qa;
  133. switch (xfs_dquot_type(dqp)) {
  134. case XFS_DQTYPE_USER:
  135. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
  136. break;
  137. case XFS_DQTYPE_GROUP:
  138. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
  139. break;
  140. case XFS_DQTYPE_PROJ:
  141. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
  142. break;
  143. default:
  144. return NULL;
  145. }
  146. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  147. if (qa[i].qt_dquot == NULL ||
  148. qa[i].qt_dquot == dqp)
  149. return &qa[i];
  150. }
  151. return NULL;
  152. }
  153. /*
  154. * Make the changes in the transaction structure.
  155. * The moral equivalent to xfs_trans_mod_sb().
  156. * We don't touch any fields in the dquot, so we don't care
  157. * if it's locked or not (most of the time it won't be).
  158. */
  159. void
  160. xfs_trans_mod_dquot(
  161. struct xfs_trans *tp,
  162. struct xfs_dquot *dqp,
  163. uint field,
  164. int64_t delta)
  165. {
  166. struct xfs_dqtrx *qtrx;
  167. ASSERT(tp);
  168. ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp));
  169. qtrx = NULL;
  170. if (!delta)
  171. return;
  172. if (tp->t_dqinfo == NULL)
  173. xfs_trans_alloc_dqinfo(tp);
  174. /*
  175. * Find either the first free slot or the slot that belongs
  176. * to this dquot.
  177. */
  178. qtrx = xfs_trans_get_dqtrx(tp, dqp);
  179. ASSERT(qtrx);
  180. if (qtrx->qt_dquot == NULL)
  181. qtrx->qt_dquot = dqp;
  182. trace_xfs_trans_mod_dquot_before(qtrx);
  183. trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
  184. switch (field) {
  185. /* regular disk blk reservation */
  186. case XFS_TRANS_DQ_RES_BLKS:
  187. qtrx->qt_blk_res += delta;
  188. break;
  189. /* inode reservation */
  190. case XFS_TRANS_DQ_RES_INOS:
  191. qtrx->qt_ino_res += delta;
  192. break;
  193. /* disk blocks used. */
  194. case XFS_TRANS_DQ_BCOUNT:
  195. qtrx->qt_bcount_delta += delta;
  196. break;
  197. case XFS_TRANS_DQ_DELBCOUNT:
  198. qtrx->qt_delbcnt_delta += delta;
  199. break;
  200. /* Inode Count */
  201. case XFS_TRANS_DQ_ICOUNT:
  202. if (qtrx->qt_ino_res && delta > 0) {
  203. qtrx->qt_ino_res_used += delta;
  204. ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
  205. }
  206. qtrx->qt_icount_delta += delta;
  207. break;
  208. /* rtblk reservation */
  209. case XFS_TRANS_DQ_RES_RTBLKS:
  210. qtrx->qt_rtblk_res += delta;
  211. break;
  212. /* rtblk count */
  213. case XFS_TRANS_DQ_RTBCOUNT:
  214. if (qtrx->qt_rtblk_res && delta > 0) {
  215. qtrx->qt_rtblk_res_used += delta;
  216. ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
  217. }
  218. qtrx->qt_rtbcount_delta += delta;
  219. break;
  220. case XFS_TRANS_DQ_DELRTBCOUNT:
  221. qtrx->qt_delrtb_delta += delta;
  222. break;
  223. default:
  224. ASSERT(0);
  225. }
  226. trace_xfs_trans_mod_dquot_after(qtrx);
  227. }
  228. /*
  229. * Given an array of dqtrx structures, lock all the dquots associated and join
  230. * them to the transaction, provided they have been modified. We know that the
  231. * highest number of dquots of one type - usr, grp and prj - involved in a
  232. * transaction is 3 so we don't need to make this very generic.
  233. */
  234. STATIC void
  235. xfs_trans_dqlockedjoin(
  236. struct xfs_trans *tp,
  237. struct xfs_dqtrx *q)
  238. {
  239. ASSERT(q[0].qt_dquot != NULL);
  240. if (q[1].qt_dquot == NULL) {
  241. xfs_dqlock(q[0].qt_dquot);
  242. xfs_trans_dqjoin(tp, q[0].qt_dquot);
  243. } else {
  244. ASSERT(XFS_QM_TRANS_MAXDQS == 2);
  245. xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
  246. xfs_trans_dqjoin(tp, q[0].qt_dquot);
  247. xfs_trans_dqjoin(tp, q[1].qt_dquot);
  248. }
  249. }
  250. /* Apply dqtrx changes to the quota reservation counters. */
  251. static inline void
  252. xfs_apply_quota_reservation_deltas(
  253. struct xfs_dquot_res *res,
  254. uint64_t reserved,
  255. int64_t res_used,
  256. int64_t count_delta)
  257. {
  258. if (reserved != 0) {
  259. /*
  260. * Subtle math here: If reserved > res_used (the normal case),
  261. * we're simply subtracting the unused transaction quota
  262. * reservation from the dquot reservation.
  263. *
  264. * If, however, res_used > reserved, then we have allocated
  265. * more quota blocks than were reserved for the transaction.
  266. * We must add that excess to the dquot reservation since it
  267. * tracks (usage + resv) and by definition we didn't reserve
  268. * that excess.
  269. */
  270. res->reserved -= abs(reserved - res_used);
  271. } else if (count_delta != 0) {
  272. /*
  273. * These blks were never reserved, either inside a transaction
  274. * or outside one (in a delayed allocation). Also, this isn't
  275. * always a negative number since we sometimes deliberately
  276. * skip quota reservations.
  277. */
  278. res->reserved += count_delta;
  279. }
  280. }
  281. /*
  282. * Called by xfs_trans_commit() and similar in spirit to
  283. * xfs_trans_apply_sb_deltas().
  284. * Go thru all the dquots belonging to this transaction and modify the
  285. * INCORE dquot to reflect the actual usages.
  286. * Unreserve just the reservations done by this transaction.
  287. * dquot is still left locked at exit.
  288. */
  289. void
  290. xfs_trans_apply_dquot_deltas(
  291. struct xfs_trans *tp)
  292. {
  293. int i, j;
  294. struct xfs_dquot *dqp;
  295. struct xfs_dqtrx *qtrx, *qa;
  296. int64_t totalbdelta;
  297. int64_t totalrtbdelta;
  298. if (!tp->t_dqinfo)
  299. return;
  300. ASSERT(tp->t_dqinfo);
  301. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  302. qa = tp->t_dqinfo->dqs[j];
  303. if (qa[0].qt_dquot == NULL)
  304. continue;
  305. /*
  306. * Lock all of the dquots and join them to the transaction.
  307. */
  308. xfs_trans_dqlockedjoin(tp, qa);
  309. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  310. uint64_t blk_res_used;
  311. qtrx = &qa[i];
  312. /*
  313. * The array of dquots is filled
  314. * sequentially, not sparsely.
  315. */
  316. if ((dqp = qtrx->qt_dquot) == NULL)
  317. break;
  318. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  319. /*
  320. * adjust the actual number of blocks used
  321. */
  322. /*
  323. * The issue here is - sometimes we don't make a blkquota
  324. * reservation intentionally to be fair to users
  325. * (when the amount is small). On the other hand,
  326. * delayed allocs do make reservations, but that's
  327. * outside of a transaction, so we have no
  328. * idea how much was really reserved.
  329. * So, here we've accumulated delayed allocation blks and
  330. * non-delay blks. The assumption is that the
  331. * delayed ones are always reserved (outside of a
  332. * transaction), and the others may or may not have
  333. * quota reservations.
  334. */
  335. totalbdelta = qtrx->qt_bcount_delta +
  336. qtrx->qt_delbcnt_delta;
  337. totalrtbdelta = qtrx->qt_rtbcount_delta +
  338. qtrx->qt_delrtb_delta;
  339. if (totalbdelta != 0 || totalrtbdelta != 0 ||
  340. qtrx->qt_icount_delta != 0) {
  341. trace_xfs_trans_apply_dquot_deltas_before(dqp);
  342. trace_xfs_trans_apply_dquot_deltas(qtrx);
  343. }
  344. #ifdef DEBUG
  345. if (totalbdelta < 0)
  346. ASSERT(dqp->q_blk.count >= -totalbdelta);
  347. if (totalrtbdelta < 0)
  348. ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
  349. if (qtrx->qt_icount_delta < 0)
  350. ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
  351. #endif
  352. if (totalbdelta)
  353. dqp->q_blk.count += totalbdelta;
  354. if (qtrx->qt_icount_delta)
  355. dqp->q_ino.count += qtrx->qt_icount_delta;
  356. if (totalrtbdelta)
  357. dqp->q_rtb.count += totalrtbdelta;
  358. if (totalbdelta != 0 || totalrtbdelta != 0 ||
  359. qtrx->qt_icount_delta != 0)
  360. trace_xfs_trans_apply_dquot_deltas_after(dqp);
  361. /*
  362. * Get any default limits in use.
  363. * Start/reset the timer(s) if needed.
  364. */
  365. if (dqp->q_id) {
  366. xfs_qm_adjust_dqlimits(dqp);
  367. xfs_qm_adjust_dqtimers(dqp);
  368. }
  369. dqp->q_flags |= XFS_DQFLAG_DIRTY;
  370. /*
  371. * add this to the list of items to get logged
  372. */
  373. xfs_trans_log_dquot(tp, dqp);
  374. /*
  375. * Take off what's left of the original reservation.
  376. * In case of delayed allocations, there's no
  377. * reservation that a transaction structure knows of.
  378. */
  379. blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
  380. xfs_apply_quota_reservation_deltas(&dqp->q_blk,
  381. qtrx->qt_blk_res, blk_res_used,
  382. qtrx->qt_bcount_delta);
  383. /*
  384. * Adjust the RT reservation.
  385. */
  386. xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
  387. qtrx->qt_rtblk_res,
  388. qtrx->qt_rtblk_res_used,
  389. qtrx->qt_rtbcount_delta);
  390. /*
  391. * Adjust the inode reservation.
  392. */
  393. ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
  394. xfs_apply_quota_reservation_deltas(&dqp->q_ino,
  395. qtrx->qt_ino_res,
  396. qtrx->qt_ino_res_used,
  397. qtrx->qt_icount_delta);
  398. ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
  399. ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
  400. ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
  401. }
  402. }
  403. }
  404. /*
  405. * Release the reservations, and adjust the dquots accordingly.
  406. * This is called only when the transaction is being aborted. If by
  407. * any chance we have done dquot modifications incore (ie. deltas) already,
  408. * we simply throw those away, since that's the expected behavior
  409. * when a transaction is curtailed without a commit.
  410. */
  411. void
  412. xfs_trans_unreserve_and_mod_dquots(
  413. struct xfs_trans *tp)
  414. {
  415. int i, j;
  416. struct xfs_dquot *dqp;
  417. struct xfs_dqtrx *qtrx, *qa;
  418. bool locked;
  419. if (!tp->t_dqinfo)
  420. return;
  421. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  422. qa = tp->t_dqinfo->dqs[j];
  423. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  424. qtrx = &qa[i];
  425. /*
  426. * We assume that the array of dquots is filled
  427. * sequentially, not sparsely.
  428. */
  429. if ((dqp = qtrx->qt_dquot) == NULL)
  430. break;
  431. /*
  432. * Unreserve the original reservation. We don't care
  433. * about the number of blocks used field, or deltas.
  434. * Also we don't bother to zero the fields.
  435. */
  436. locked = false;
  437. if (qtrx->qt_blk_res) {
  438. xfs_dqlock(dqp);
  439. locked = true;
  440. dqp->q_blk.reserved -=
  441. (xfs_qcnt_t)qtrx->qt_blk_res;
  442. }
  443. if (qtrx->qt_ino_res) {
  444. if (!locked) {
  445. xfs_dqlock(dqp);
  446. locked = true;
  447. }
  448. dqp->q_ino.reserved -=
  449. (xfs_qcnt_t)qtrx->qt_ino_res;
  450. }
  451. if (qtrx->qt_rtblk_res) {
  452. if (!locked) {
  453. xfs_dqlock(dqp);
  454. locked = true;
  455. }
  456. dqp->q_rtb.reserved -=
  457. (xfs_qcnt_t)qtrx->qt_rtblk_res;
  458. }
  459. if (locked)
  460. xfs_dqunlock(dqp);
  461. }
  462. }
  463. }
  464. STATIC void
  465. xfs_quota_warn(
  466. struct xfs_mount *mp,
  467. struct xfs_dquot *dqp,
  468. int type)
  469. {
  470. enum quota_type qtype;
  471. switch (xfs_dquot_type(dqp)) {
  472. case XFS_DQTYPE_PROJ:
  473. qtype = PRJQUOTA;
  474. break;
  475. case XFS_DQTYPE_USER:
  476. qtype = USRQUOTA;
  477. break;
  478. case XFS_DQTYPE_GROUP:
  479. qtype = GRPQUOTA;
  480. break;
  481. default:
  482. return;
  483. }
  484. quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
  485. mp->m_super->s_dev, type);
  486. }
  487. /*
  488. * Decide if we can make an additional reservation against a quota resource.
  489. * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
  490. *
  491. * Note that we assume that the numeric difference between the inode and block
  492. * warning codes will always be 3 since it's userspace ABI now, and will never
  493. * decrease the quota reservation, so the *BELOW messages are irrelevant.
  494. */
  495. static inline int
  496. xfs_dqresv_check(
  497. struct xfs_dquot_res *res,
  498. struct xfs_quota_limits *qlim,
  499. int64_t delta,
  500. bool *fatal)
  501. {
  502. xfs_qcnt_t hardlimit = res->hardlimit;
  503. xfs_qcnt_t softlimit = res->softlimit;
  504. xfs_qcnt_t total_count = res->reserved + delta;
  505. BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
  506. BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
  507. BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
  508. *fatal = false;
  509. if (delta <= 0)
  510. return QUOTA_NL_NOWARN;
  511. if (!hardlimit)
  512. hardlimit = qlim->hard;
  513. if (!softlimit)
  514. softlimit = qlim->soft;
  515. if (hardlimit && total_count > hardlimit) {
  516. *fatal = true;
  517. return QUOTA_NL_IHARDWARN;
  518. }
  519. if (softlimit && total_count > softlimit) {
  520. time64_t now = ktime_get_real_seconds();
  521. if (res->timer != 0 && now > res->timer) {
  522. *fatal = true;
  523. return QUOTA_NL_ISOFTLONGWARN;
  524. }
  525. return QUOTA_NL_ISOFTWARN;
  526. }
  527. return QUOTA_NL_NOWARN;
  528. }
  529. /*
  530. * This reserves disk blocks and inodes against a dquot.
  531. * Flags indicate if the dquot is to be locked here and also
  532. * if the blk reservation is for RT or regular blocks.
  533. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
  534. */
  535. STATIC int
  536. xfs_trans_dqresv(
  537. struct xfs_trans *tp,
  538. struct xfs_mount *mp,
  539. struct xfs_dquot *dqp,
  540. int64_t nblks,
  541. long ninos,
  542. uint flags)
  543. {
  544. struct xfs_quotainfo *q = mp->m_quotainfo;
  545. struct xfs_def_quota *defq;
  546. struct xfs_dquot_res *blkres;
  547. struct xfs_quota_limits *qlim;
  548. xfs_dqlock(dqp);
  549. defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
  550. if (flags & XFS_TRANS_DQ_RES_BLKS) {
  551. blkres = &dqp->q_blk;
  552. qlim = &defq->blk;
  553. } else {
  554. blkres = &dqp->q_rtb;
  555. qlim = &defq->rtb;
  556. }
  557. if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
  558. xfs_dquot_is_enforced(dqp)) {
  559. int quota_nl;
  560. bool fatal;
  561. /*
  562. * dquot is locked already. See if we'd go over the hardlimit
  563. * or exceed the timelimit if we'd reserve resources.
  564. */
  565. quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
  566. if (quota_nl != QUOTA_NL_NOWARN) {
  567. /*
  568. * Quota block warning codes are 3 more than the inode
  569. * codes, which we check above.
  570. */
  571. xfs_quota_warn(mp, dqp, quota_nl + 3);
  572. if (fatal)
  573. goto error_return;
  574. }
  575. quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
  576. &fatal);
  577. if (quota_nl != QUOTA_NL_NOWARN) {
  578. xfs_quota_warn(mp, dqp, quota_nl);
  579. if (fatal)
  580. goto error_return;
  581. }
  582. }
  583. /*
  584. * Change the reservation, but not the actual usage.
  585. * Note that q_blk.reserved = q_blk.count + resv
  586. */
  587. blkres->reserved += (xfs_qcnt_t)nblks;
  588. dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
  589. /*
  590. * note the reservation amt in the trans struct too,
  591. * so that the transaction knows how much was reserved by
  592. * it against this particular dquot.
  593. * We don't do this when we are reserving for a delayed allocation,
  594. * because we don't have the luxury of a transaction envelope then.
  595. */
  596. if (tp) {
  597. ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
  598. xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK,
  599. nblks);
  600. xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
  601. }
  602. if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
  603. XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
  604. XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
  605. goto error_corrupt;
  606. xfs_dqunlock(dqp);
  607. return 0;
  608. error_return:
  609. xfs_dqunlock(dqp);
  610. if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
  611. return -ENOSPC;
  612. return -EDQUOT;
  613. error_corrupt:
  614. xfs_dqunlock(dqp);
  615. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  616. return -EFSCORRUPTED;
  617. }
  618. /*
  619. * Given dquot(s), make disk block and/or inode reservations against them.
  620. * The fact that this does the reservation against user, group and
  621. * project quotas is important, because this follows a all-or-nothing
  622. * approach.
  623. *
  624. * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
  625. * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
  626. * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
  627. * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
  628. * dquots are unlocked on return, if they were not locked by caller.
  629. */
  630. int
  631. xfs_trans_reserve_quota_bydquots(
  632. struct xfs_trans *tp,
  633. struct xfs_mount *mp,
  634. struct xfs_dquot *udqp,
  635. struct xfs_dquot *gdqp,
  636. struct xfs_dquot *pdqp,
  637. int64_t nblks,
  638. long ninos,
  639. uint flags)
  640. {
  641. int error;
  642. if (!XFS_IS_QUOTA_ON(mp))
  643. return 0;
  644. ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
  645. if (udqp) {
  646. error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
  647. if (error)
  648. return error;
  649. }
  650. if (gdqp) {
  651. error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
  652. if (error)
  653. goto unwind_usr;
  654. }
  655. if (pdqp) {
  656. error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
  657. if (error)
  658. goto unwind_grp;
  659. }
  660. /*
  661. * Didn't change anything critical, so, no need to log
  662. */
  663. return 0;
  664. unwind_grp:
  665. flags |= XFS_QMOPT_FORCE_RES;
  666. if (gdqp)
  667. xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
  668. unwind_usr:
  669. flags |= XFS_QMOPT_FORCE_RES;
  670. if (udqp)
  671. xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
  672. return error;
  673. }
  674. /*
  675. * Lock the dquot and change the reservation if we can.
  676. * This doesn't change the actual usage, just the reservation.
  677. * The inode sent in is locked.
  678. */
  679. int
  680. xfs_trans_reserve_quota_nblks(
  681. struct xfs_trans *tp,
  682. struct xfs_inode *ip,
  683. int64_t dblocks,
  684. int64_t rblocks,
  685. bool force)
  686. {
  687. struct xfs_mount *mp = ip->i_mount;
  688. unsigned int qflags = 0;
  689. int error;
  690. if (!XFS_IS_QUOTA_ON(mp))
  691. return 0;
  692. ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
  693. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  694. if (force)
  695. qflags |= XFS_QMOPT_FORCE_RES;
  696. /* Reserve data device quota against the inode's dquots. */
  697. error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
  698. ip->i_gdquot, ip->i_pdquot, dblocks, 0,
  699. XFS_QMOPT_RES_REGBLKS | qflags);
  700. if (error)
  701. return error;
  702. /* Do the same but for realtime blocks. */
  703. error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
  704. ip->i_gdquot, ip->i_pdquot, rblocks, 0,
  705. XFS_QMOPT_RES_RTBLKS | qflags);
  706. if (error) {
  707. xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
  708. ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
  709. XFS_QMOPT_RES_REGBLKS);
  710. return error;
  711. }
  712. return 0;
  713. }
  714. /* Change the quota reservations for an inode creation activity. */
  715. int
  716. xfs_trans_reserve_quota_icreate(
  717. struct xfs_trans *tp,
  718. struct xfs_dquot *udqp,
  719. struct xfs_dquot *gdqp,
  720. struct xfs_dquot *pdqp,
  721. int64_t dblocks)
  722. {
  723. struct xfs_mount *mp = tp->t_mountp;
  724. if (!XFS_IS_QUOTA_ON(mp))
  725. return 0;
  726. return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
  727. dblocks, 1, XFS_QMOPT_RES_REGBLKS);
  728. }
  729. STATIC void
  730. xfs_trans_alloc_dqinfo(
  731. xfs_trans_t *tp)
  732. {
  733. tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache,
  734. GFP_KERNEL | __GFP_NOFAIL);
  735. }
  736. void
  737. xfs_trans_free_dqinfo(
  738. xfs_trans_t *tp)
  739. {
  740. if (!tp->t_dqinfo)
  741. return;
  742. kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo);
  743. tp->t_dqinfo = NULL;
  744. }