quota_global.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Implementation of operations over global quota file
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/fs.h>
  7. #include <linux/slab.h>
  8. #include <linux/quota.h>
  9. #include <linux/quotaops.h>
  10. #include <linux/dqblk_qtree.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/writeback.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/llist.h>
  15. #include <linux/iversion.h>
  16. #include <cluster/masklog.h>
  17. #include "ocfs2_fs.h"
  18. #include "ocfs2.h"
  19. #include "alloc.h"
  20. #include "blockcheck.h"
  21. #include "inode.h"
  22. #include "journal.h"
  23. #include "file.h"
  24. #include "sysfile.h"
  25. #include "dlmglue.h"
  26. #include "uptodate.h"
  27. #include "super.h"
  28. #include "buffer_head_io.h"
  29. #include "quota.h"
  30. #include "ocfs2_trace.h"
  31. /*
  32. * Locking of quotas with OCFS2 is rather complex. Here are rules that
  33. * should be obeyed by all the functions:
  34. * - any write of quota structure (either to local or global file) is protected
  35. * by dqio_sem or dquot->dq_lock.
  36. * - any modification of global quota file holds inode cluster lock, i_rwsem,
  37. * and ip_alloc_sem of the global quota file (achieved by
  38. * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
  39. * - an allocation of new blocks for local quota file is protected by
  40. * its ip_alloc_sem
  41. *
  42. * A rough sketch of locking dependencies (lf = local file, gf = global file):
  43. * Normal filesystem operation:
  44. * start_trans -> dqio_sem -> write to lf
  45. * Syncing of local and global file:
  46. * ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
  47. * write to gf
  48. * -> write to lf
  49. * Acquire dquot for the first time:
  50. * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
  51. * -> alloc space for gf
  52. * -> start_trans -> qinfo_lock -> write to gf
  53. * -> ip_alloc_sem of lf -> alloc space for lf
  54. * -> write to lf
  55. * Release last reference to dquot:
  56. * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
  57. * -> write to lf
  58. * Note that all the above operations also hold the inode cluster lock of lf.
  59. * Recovery:
  60. * inode cluster lock of recovered lf
  61. * -> read bitmaps -> ip_alloc_sem of lf
  62. * -> ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
  63. * write to gf
  64. */
  65. static void qsync_work_fn(struct work_struct *work);
  66. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  67. {
  68. struct ocfs2_global_disk_dqblk *d = dp;
  69. struct mem_dqblk *m = &dquot->dq_dqb;
  70. /* Update from disk only entries not set by the admin */
  71. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  72. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  73. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  74. }
  75. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  76. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  77. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  78. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  79. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  80. }
  81. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  82. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  83. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  84. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  85. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  86. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  87. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  88. }
  89. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  90. {
  91. struct ocfs2_global_disk_dqblk *d = dp;
  92. struct mem_dqblk *m = &dquot->dq_dqb;
  93. d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
  94. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  95. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  96. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  97. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  98. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  99. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  100. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  101. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  102. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  103. d->dqb_pad1 = d->dqb_pad2 = 0;
  104. }
  105. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  106. {
  107. struct ocfs2_global_disk_dqblk *d = dp;
  108. struct ocfs2_mem_dqinfo *oinfo =
  109. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  110. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  111. return 0;
  112. return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
  113. le32_to_cpu(d->dqb_id)),
  114. dquot->dq_id);
  115. }
  116. const struct qtree_fmt_operations ocfs2_global_ops = {
  117. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  118. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  119. .is_id = ocfs2_global_is_id,
  120. };
  121. int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
  122. {
  123. struct ocfs2_disk_dqtrailer *dqt =
  124. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  125. trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
  126. BUG_ON(!buffer_uptodate(bh));
  127. /*
  128. * If the ecc fails, we return the error but otherwise
  129. * leave the filesystem running. We know any error is
  130. * local to this block.
  131. */
  132. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  133. }
  134. int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
  135. struct buffer_head **bhp)
  136. {
  137. int rc;
  138. *bhp = NULL;
  139. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
  140. ocfs2_validate_quota_block);
  141. if (rc)
  142. mlog_errno(rc);
  143. return rc;
  144. }
  145. /* Read data from global quotafile - avoid pagecache and such because we cannot
  146. * afford acquiring the locks... We use quota cluster lock to serialize
  147. * operations. Caller is responsible for acquiring it. */
  148. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  149. size_t len, loff_t off)
  150. {
  151. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  152. struct inode *gqinode = oinfo->dqi_gqinode;
  153. loff_t i_size = i_size_read(gqinode);
  154. int offset = off & (sb->s_blocksize - 1);
  155. sector_t blk = off >> sb->s_blocksize_bits;
  156. int err = 0;
  157. struct buffer_head *bh;
  158. size_t toread, tocopy;
  159. u64 pblock = 0, pcount = 0;
  160. if (off > i_size)
  161. return 0;
  162. if (off + len > i_size)
  163. len = i_size - off;
  164. toread = len;
  165. while (toread > 0) {
  166. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  167. if (!pcount) {
  168. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
  169. &pcount, NULL);
  170. if (err) {
  171. mlog_errno(err);
  172. return err;
  173. }
  174. } else {
  175. pcount--;
  176. pblock++;
  177. }
  178. bh = NULL;
  179. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  180. if (err) {
  181. mlog_errno(err);
  182. return err;
  183. }
  184. memcpy(data, bh->b_data + offset, tocopy);
  185. brelse(bh);
  186. offset = 0;
  187. toread -= tocopy;
  188. data += tocopy;
  189. blk++;
  190. }
  191. return len;
  192. }
  193. /* Write to quotafile (we know the transaction is already started and has
  194. * enough credits) */
  195. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  196. const char *data, size_t len, loff_t off)
  197. {
  198. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  199. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  200. struct inode *gqinode = oinfo->dqi_gqinode;
  201. int offset = off & (sb->s_blocksize - 1);
  202. sector_t blk = off >> sb->s_blocksize_bits;
  203. int err = 0, new = 0, ja_type;
  204. struct buffer_head *bh = NULL;
  205. handle_t *handle = journal_current_handle();
  206. u64 pblock, pcount;
  207. if (!handle) {
  208. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  209. "because transaction was not started.\n",
  210. (unsigned long long)off, (unsigned long long)len);
  211. return -EIO;
  212. }
  213. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  214. WARN_ON(1);
  215. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  216. }
  217. if (i_size_read(gqinode) < off + len) {
  218. loff_t rounded_end =
  219. ocfs2_align_bytes_to_blocks(sb, off + len);
  220. /* Space is already allocated in ocfs2_acquire_dquot() */
  221. err = ocfs2_simple_size_update(gqinode,
  222. oinfo->dqi_gqi_bh,
  223. rounded_end);
  224. if (err < 0)
  225. goto out;
  226. new = 1;
  227. }
  228. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
  229. if (err) {
  230. mlog_errno(err);
  231. goto out;
  232. }
  233. /* Not rewriting whole block? */
  234. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  235. !new) {
  236. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  237. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  238. } else {
  239. bh = sb_getblk(sb, pblock);
  240. if (!bh)
  241. err = -ENOMEM;
  242. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  243. }
  244. if (err) {
  245. mlog_errno(err);
  246. goto out;
  247. }
  248. lock_buffer(bh);
  249. if (new)
  250. memset(bh->b_data, 0, sb->s_blocksize);
  251. memcpy(bh->b_data + offset, data, len);
  252. flush_dcache_page(bh->b_page);
  253. set_buffer_uptodate(bh);
  254. unlock_buffer(bh);
  255. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  256. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  257. ja_type);
  258. if (err < 0) {
  259. brelse(bh);
  260. goto out;
  261. }
  262. ocfs2_journal_dirty(handle, bh);
  263. brelse(bh);
  264. out:
  265. if (err) {
  266. mlog_errno(err);
  267. return err;
  268. }
  269. inode_inc_iversion(gqinode);
  270. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  271. return len;
  272. }
  273. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  274. {
  275. int status;
  276. struct buffer_head *bh = NULL;
  277. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  278. if (status < 0)
  279. return status;
  280. spin_lock(&dq_data_lock);
  281. if (!oinfo->dqi_gqi_count++)
  282. oinfo->dqi_gqi_bh = bh;
  283. else
  284. WARN_ON(bh != oinfo->dqi_gqi_bh);
  285. spin_unlock(&dq_data_lock);
  286. if (ex) {
  287. inode_lock(oinfo->dqi_gqinode);
  288. down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  289. } else {
  290. down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  291. }
  292. return 0;
  293. }
  294. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  295. {
  296. if (ex) {
  297. up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  298. inode_unlock(oinfo->dqi_gqinode);
  299. } else {
  300. up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  301. }
  302. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  303. brelse(oinfo->dqi_gqi_bh);
  304. spin_lock(&dq_data_lock);
  305. if (!--oinfo->dqi_gqi_count)
  306. oinfo->dqi_gqi_bh = NULL;
  307. spin_unlock(&dq_data_lock);
  308. }
  309. /* Read information header from global quota file */
  310. int ocfs2_global_read_info(struct super_block *sb, int type)
  311. {
  312. unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  313. GROUP_QUOTA_SYSTEM_INODE };
  314. struct ocfs2_global_disk_dqinfo dinfo;
  315. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  316. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  317. u64 pcount;
  318. int status;
  319. oinfo->dqi_gi.dqi_sb = sb;
  320. oinfo->dqi_gi.dqi_type = type;
  321. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  322. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  323. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  324. oinfo->dqi_gqi_bh = NULL;
  325. oinfo->dqi_gqi_count = 0;
  326. /* Read global header */
  327. oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  328. OCFS2_INVALID_SLOT);
  329. if (!oinfo->dqi_gqinode) {
  330. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  331. type);
  332. status = -EINVAL;
  333. goto out_err;
  334. }
  335. status = ocfs2_lock_global_qf(oinfo, 0);
  336. if (status < 0) {
  337. mlog_errno(status);
  338. goto out_err;
  339. }
  340. status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
  341. &pcount, NULL);
  342. if (status < 0)
  343. goto out_unlock;
  344. status = ocfs2_qinfo_lock(oinfo, 0);
  345. if (status < 0)
  346. goto out_unlock;
  347. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  348. sizeof(struct ocfs2_global_disk_dqinfo),
  349. OCFS2_GLOBAL_INFO_OFF);
  350. ocfs2_qinfo_unlock(oinfo, 0);
  351. ocfs2_unlock_global_qf(oinfo, 0);
  352. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  353. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  354. status);
  355. if (status >= 0)
  356. status = -EIO;
  357. mlog_errno(status);
  358. goto out_err;
  359. }
  360. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  361. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  362. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  363. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  364. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  365. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  366. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  367. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  368. OCFS2_QBLK_RESERVED_SPACE;
  369. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  370. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  371. schedule_delayed_work(&oinfo->dqi_sync_work,
  372. msecs_to_jiffies(oinfo->dqi_syncms));
  373. out_err:
  374. return status;
  375. out_unlock:
  376. ocfs2_unlock_global_qf(oinfo, 0);
  377. mlog_errno(status);
  378. goto out_err;
  379. }
  380. /* Write information to global quota file. Expects exclusive lock on quota
  381. * file inode and quota info */
  382. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  383. {
  384. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  385. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  386. struct ocfs2_global_disk_dqinfo dinfo;
  387. ssize_t size;
  388. spin_lock(&dq_data_lock);
  389. info->dqi_flags &= ~DQF_INFO_DIRTY;
  390. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  391. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  392. spin_unlock(&dq_data_lock);
  393. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  394. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  395. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  396. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  397. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  398. sizeof(struct ocfs2_global_disk_dqinfo),
  399. OCFS2_GLOBAL_INFO_OFF);
  400. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  401. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  402. if (size >= 0)
  403. size = -EIO;
  404. return size;
  405. }
  406. return 0;
  407. }
  408. int ocfs2_global_write_info(struct super_block *sb, int type)
  409. {
  410. int err;
  411. struct quota_info *dqopt = sb_dqopt(sb);
  412. struct ocfs2_mem_dqinfo *info = dqopt->info[type].dqi_priv;
  413. down_write(&dqopt->dqio_sem);
  414. err = ocfs2_qinfo_lock(info, 1);
  415. if (err < 0)
  416. goto out_sem;
  417. err = __ocfs2_global_write_info(sb, type);
  418. ocfs2_qinfo_unlock(info, 1);
  419. out_sem:
  420. up_write(&dqopt->dqio_sem);
  421. return err;
  422. }
  423. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  424. {
  425. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  426. /*
  427. * We may need to allocate tree blocks and a leaf block but not the
  428. * root block
  429. */
  430. return oinfo->dqi_gi.dqi_qtree_depth;
  431. }
  432. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  433. {
  434. /* We modify all the allocated blocks, tree root, info block and
  435. * the inode */
  436. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  437. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
  438. }
  439. /* Sync local information about quota modifications with global quota file.
  440. * Caller must have started the transaction and obtained exclusive lock for
  441. * global quota file inode */
  442. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  443. {
  444. int err, err2;
  445. struct super_block *sb = dquot->dq_sb;
  446. int type = dquot->dq_id.type;
  447. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  448. struct ocfs2_global_disk_dqblk dqblk;
  449. s64 spacechange, inodechange;
  450. time64_t olditime, oldbtime;
  451. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  452. sizeof(struct ocfs2_global_disk_dqblk),
  453. dquot->dq_off);
  454. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  455. if (err >= 0) {
  456. mlog(ML_ERROR, "Short read from global quota file "
  457. "(%u read)\n", err);
  458. err = -EIO;
  459. }
  460. goto out;
  461. }
  462. /* Update space and inode usage. Get also other information from
  463. * global quota file so that we don't overwrite any changes there.
  464. * We are */
  465. spin_lock(&dquot->dq_dqb_lock);
  466. spacechange = dquot->dq_dqb.dqb_curspace -
  467. OCFS2_DQUOT(dquot)->dq_origspace;
  468. inodechange = dquot->dq_dqb.dqb_curinodes -
  469. OCFS2_DQUOT(dquot)->dq_originodes;
  470. olditime = dquot->dq_dqb.dqb_itime;
  471. oldbtime = dquot->dq_dqb.dqb_btime;
  472. ocfs2_global_disk2memdqb(dquot, &dqblk);
  473. trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  474. dquot->dq_dqb.dqb_curspace,
  475. (long long)spacechange,
  476. dquot->dq_dqb.dqb_curinodes,
  477. (long long)inodechange);
  478. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  479. dquot->dq_dqb.dqb_curspace += spacechange;
  480. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  481. dquot->dq_dqb.dqb_curinodes += inodechange;
  482. /* Set properly space grace time... */
  483. if (dquot->dq_dqb.dqb_bsoftlimit &&
  484. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  485. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  486. oldbtime > 0) {
  487. if (dquot->dq_dqb.dqb_btime > 0)
  488. dquot->dq_dqb.dqb_btime =
  489. min(dquot->dq_dqb.dqb_btime, oldbtime);
  490. else
  491. dquot->dq_dqb.dqb_btime = oldbtime;
  492. }
  493. } else {
  494. dquot->dq_dqb.dqb_btime = 0;
  495. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  496. }
  497. /* Set properly inode grace time... */
  498. if (dquot->dq_dqb.dqb_isoftlimit &&
  499. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  500. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  501. olditime > 0) {
  502. if (dquot->dq_dqb.dqb_itime > 0)
  503. dquot->dq_dqb.dqb_itime =
  504. min(dquot->dq_dqb.dqb_itime, olditime);
  505. else
  506. dquot->dq_dqb.dqb_itime = olditime;
  507. }
  508. } else {
  509. dquot->dq_dqb.dqb_itime = 0;
  510. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  511. }
  512. /* All information is properly updated, clear the flags */
  513. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  514. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  515. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  516. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  517. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  518. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  519. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  520. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  521. spin_unlock(&dquot->dq_dqb_lock);
  522. err = ocfs2_qinfo_lock(info, freeing);
  523. if (err < 0) {
  524. mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
  525. " (type=%d, id=%u)\n", dquot->dq_id.type,
  526. (unsigned)from_kqid(&init_user_ns, dquot->dq_id));
  527. goto out;
  528. }
  529. if (freeing)
  530. OCFS2_DQUOT(dquot)->dq_use_count--;
  531. err = qtree_write_dquot(&info->dqi_gi, dquot);
  532. if (err < 0)
  533. goto out_qlock;
  534. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  535. err = qtree_release_dquot(&info->dqi_gi, dquot);
  536. if (info_dirty(sb_dqinfo(sb, type))) {
  537. err2 = __ocfs2_global_write_info(sb, type);
  538. if (!err)
  539. err = err2;
  540. }
  541. }
  542. out_qlock:
  543. ocfs2_qinfo_unlock(info, freeing);
  544. out:
  545. if (err < 0)
  546. mlog_errno(err);
  547. return err;
  548. }
  549. /*
  550. * Functions for periodic syncing of dquots with global file
  551. */
  552. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  553. {
  554. handle_t *handle;
  555. struct super_block *sb = dquot->dq_sb;
  556. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  557. struct ocfs2_super *osb = OCFS2_SB(sb);
  558. int status = 0;
  559. trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
  560. dquot->dq_id.type,
  561. type, sb->s_id);
  562. if (type != dquot->dq_id.type)
  563. goto out;
  564. status = ocfs2_lock_global_qf(oinfo, 1);
  565. if (status < 0)
  566. goto out;
  567. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  568. if (IS_ERR(handle)) {
  569. status = PTR_ERR(handle);
  570. mlog_errno(status);
  571. goto out_ilock;
  572. }
  573. down_write(&sb_dqopt(sb)->dqio_sem);
  574. status = ocfs2_sync_dquot(dquot);
  575. if (status < 0)
  576. mlog_errno(status);
  577. /* We have to write local structure as well... */
  578. status = ocfs2_local_write_dquot(dquot);
  579. if (status < 0)
  580. mlog_errno(status);
  581. up_write(&sb_dqopt(sb)->dqio_sem);
  582. ocfs2_commit_trans(osb, handle);
  583. out_ilock:
  584. ocfs2_unlock_global_qf(oinfo, 1);
  585. out:
  586. return status;
  587. }
  588. static void qsync_work_fn(struct work_struct *work)
  589. {
  590. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  591. struct ocfs2_mem_dqinfo,
  592. dqi_sync_work.work);
  593. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  594. /*
  595. * We have to be careful here not to deadlock on s_umount as umount
  596. * disabling quotas may be in progress and it waits for this work to
  597. * complete. If trylock fails, we'll do the sync next time...
  598. */
  599. if (down_read_trylock(&sb->s_umount)) {
  600. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  601. up_read(&sb->s_umount);
  602. }
  603. schedule_delayed_work(&oinfo->dqi_sync_work,
  604. msecs_to_jiffies(oinfo->dqi_syncms));
  605. }
  606. /*
  607. * Wrappers for generic quota functions
  608. */
  609. static int ocfs2_write_dquot(struct dquot *dquot)
  610. {
  611. handle_t *handle;
  612. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  613. int status = 0;
  614. trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  615. dquot->dq_id.type);
  616. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  617. if (IS_ERR(handle)) {
  618. status = PTR_ERR(handle);
  619. mlog_errno(status);
  620. goto out;
  621. }
  622. down_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
  623. status = ocfs2_local_write_dquot(dquot);
  624. up_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
  625. ocfs2_commit_trans(osb, handle);
  626. out:
  627. return status;
  628. }
  629. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  630. {
  631. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  632. /*
  633. * We modify tree, leaf block, global info, local chunk header,
  634. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  635. * accounts for inode update
  636. */
  637. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  638. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  639. OCFS2_QINFO_WRITE_CREDITS +
  640. OCFS2_INODE_UPDATE_CREDITS;
  641. }
  642. void ocfs2_drop_dquot_refs(struct work_struct *work)
  643. {
  644. struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
  645. dquot_drop_work);
  646. struct llist_node *list;
  647. struct ocfs2_dquot *odquot, *next_odquot;
  648. list = llist_del_all(&osb->dquot_drop_list);
  649. llist_for_each_entry_safe(odquot, next_odquot, list, list) {
  650. /* Drop the reference we acquired in ocfs2_dquot_release() */
  651. dqput(&odquot->dq_dquot);
  652. }
  653. }
  654. /*
  655. * Called when the last reference to dquot is dropped. If we are called from
  656. * downconvert thread, we cannot do all the handling here because grabbing
  657. * quota lock could deadlock (the node holding the quota lock could need some
  658. * other cluster lock to proceed but with blocked downconvert thread we cannot
  659. * release any lock).
  660. */
  661. static int ocfs2_release_dquot(struct dquot *dquot)
  662. {
  663. handle_t *handle;
  664. struct ocfs2_mem_dqinfo *oinfo =
  665. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  666. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  667. int status = 0;
  668. trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  669. dquot->dq_id.type);
  670. mutex_lock(&dquot->dq_lock);
  671. /* Check whether we are not racing with some other dqget() */
  672. if (dquot_is_busy(dquot))
  673. goto out;
  674. /* Running from downconvert thread? Postpone quota processing to wq */
  675. if (current == osb->dc_task) {
  676. /*
  677. * Grab our own reference to dquot and queue it for delayed
  678. * dropping. Quota code rechecks after calling
  679. * ->release_dquot() and won't free dquot structure.
  680. */
  681. dqgrab(dquot);
  682. /* First entry on list -> queue work */
  683. if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list))
  684. queue_work(osb->ocfs2_wq, &osb->dquot_drop_work);
  685. goto out;
  686. }
  687. status = ocfs2_lock_global_qf(oinfo, 1);
  688. if (status < 0)
  689. goto out;
  690. handle = ocfs2_start_trans(osb,
  691. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
  692. if (IS_ERR(handle)) {
  693. status = PTR_ERR(handle);
  694. mlog_errno(status);
  695. goto out_ilock;
  696. }
  697. status = ocfs2_global_release_dquot(dquot);
  698. if (status < 0) {
  699. mlog_errno(status);
  700. goto out_trans;
  701. }
  702. status = ocfs2_local_release_dquot(handle, dquot);
  703. /*
  704. * If we fail here, we cannot do much as global structure is
  705. * already released. So just complain...
  706. */
  707. if (status < 0)
  708. mlog_errno(status);
  709. /*
  710. * Clear dq_off so that we search for the structure in quota file next
  711. * time we acquire it. The structure might be deleted and reallocated
  712. * elsewhere by another node while our dquot structure is on freelist.
  713. */
  714. dquot->dq_off = 0;
  715. clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  716. out_trans:
  717. ocfs2_commit_trans(osb, handle);
  718. out_ilock:
  719. ocfs2_unlock_global_qf(oinfo, 1);
  720. out:
  721. mutex_unlock(&dquot->dq_lock);
  722. if (status)
  723. mlog_errno(status);
  724. return status;
  725. }
  726. /*
  727. * Read global dquot structure from disk or create it if it does
  728. * not exist. Also update use count of the global structure and
  729. * create structure in node-local quota file.
  730. */
  731. static int ocfs2_acquire_dquot(struct dquot *dquot)
  732. {
  733. int status = 0, err;
  734. int ex = 0;
  735. struct super_block *sb = dquot->dq_sb;
  736. struct ocfs2_super *osb = OCFS2_SB(sb);
  737. int type = dquot->dq_id.type;
  738. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  739. struct inode *gqinode = info->dqi_gqinode;
  740. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  741. handle_t *handle;
  742. trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  743. type);
  744. mutex_lock(&dquot->dq_lock);
  745. /*
  746. * We need an exclusive lock, because we're going to update use count
  747. * and instantiate possibly new dquot structure
  748. */
  749. status = ocfs2_lock_global_qf(info, 1);
  750. if (status < 0)
  751. goto out;
  752. status = ocfs2_qinfo_lock(info, 0);
  753. if (status < 0)
  754. goto out_dq;
  755. /*
  756. * We always want to read dquot structure from disk because we don't
  757. * know what happened with it while it was on freelist.
  758. */
  759. status = qtree_read_dquot(&info->dqi_gi, dquot);
  760. ocfs2_qinfo_unlock(info, 0);
  761. if (status < 0)
  762. goto out_dq;
  763. OCFS2_DQUOT(dquot)->dq_use_count++;
  764. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  765. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  766. if (!dquot->dq_off) { /* No real quota entry? */
  767. ex = 1;
  768. /*
  769. * Add blocks to quota file before we start a transaction since
  770. * locking allocators ranks above a transaction start
  771. */
  772. WARN_ON(journal_current_handle());
  773. status = ocfs2_extend_no_holes(gqinode, NULL,
  774. i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits),
  775. i_size_read(gqinode));
  776. if (status < 0)
  777. goto out_dq;
  778. }
  779. handle = ocfs2_start_trans(osb,
  780. ocfs2_calc_global_qinit_credits(sb, type));
  781. if (IS_ERR(handle)) {
  782. status = PTR_ERR(handle);
  783. goto out_dq;
  784. }
  785. status = ocfs2_qinfo_lock(info, ex);
  786. if (status < 0)
  787. goto out_trans;
  788. status = qtree_write_dquot(&info->dqi_gi, dquot);
  789. if (ex && info_dirty(sb_dqinfo(sb, type))) {
  790. err = __ocfs2_global_write_info(sb, type);
  791. if (!status)
  792. status = err;
  793. }
  794. ocfs2_qinfo_unlock(info, ex);
  795. out_trans:
  796. ocfs2_commit_trans(osb, handle);
  797. out_dq:
  798. ocfs2_unlock_global_qf(info, 1);
  799. if (status < 0)
  800. goto out;
  801. status = ocfs2_create_local_dquot(dquot);
  802. if (status < 0)
  803. goto out;
  804. set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  805. out:
  806. mutex_unlock(&dquot->dq_lock);
  807. if (status)
  808. mlog_errno(status);
  809. return status;
  810. }
  811. static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
  812. {
  813. int type = qid->type;
  814. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  815. int status = 0;
  816. trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
  817. if (!sb_has_quota_loaded(sb, type)) {
  818. status = -ESRCH;
  819. goto out;
  820. }
  821. status = ocfs2_lock_global_qf(info, 0);
  822. if (status < 0)
  823. goto out;
  824. status = ocfs2_qinfo_lock(info, 0);
  825. if (status < 0)
  826. goto out_global;
  827. status = qtree_get_next_id(&info->dqi_gi, qid);
  828. ocfs2_qinfo_unlock(info, 0);
  829. out_global:
  830. ocfs2_unlock_global_qf(info, 0);
  831. out:
  832. /*
  833. * Avoid logging ENOENT since it just means there isn't next ID and
  834. * ESRCH which means quota isn't enabled for the filesystem.
  835. */
  836. if (status && status != -ENOENT && status != -ESRCH)
  837. mlog_errno(status);
  838. return status;
  839. }
  840. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  841. {
  842. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  843. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  844. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  845. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  846. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  847. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  848. int sync = 0;
  849. int status;
  850. struct super_block *sb = dquot->dq_sb;
  851. int type = dquot->dq_id.type;
  852. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  853. handle_t *handle;
  854. struct ocfs2_super *osb = OCFS2_SB(sb);
  855. trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
  856. type);
  857. /* In case user set some limits, sync dquot immediately to global
  858. * quota file so that information propagates quicker */
  859. spin_lock(&dquot->dq_dqb_lock);
  860. if (dquot->dq_flags & mask)
  861. sync = 1;
  862. spin_unlock(&dquot->dq_dqb_lock);
  863. /* This is a slight hack but we can't afford getting global quota
  864. * lock if we already have a transaction started. */
  865. if (!sync || journal_current_handle()) {
  866. status = ocfs2_write_dquot(dquot);
  867. goto out;
  868. }
  869. status = ocfs2_lock_global_qf(oinfo, 1);
  870. if (status < 0)
  871. goto out;
  872. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  873. if (IS_ERR(handle)) {
  874. status = PTR_ERR(handle);
  875. mlog_errno(status);
  876. goto out_ilock;
  877. }
  878. down_write(&sb_dqopt(sb)->dqio_sem);
  879. status = ocfs2_sync_dquot(dquot);
  880. if (status < 0) {
  881. mlog_errno(status);
  882. goto out_dlock;
  883. }
  884. /* Now write updated local dquot structure */
  885. status = ocfs2_local_write_dquot(dquot);
  886. out_dlock:
  887. up_write(&sb_dqopt(sb)->dqio_sem);
  888. ocfs2_commit_trans(osb, handle);
  889. out_ilock:
  890. ocfs2_unlock_global_qf(oinfo, 1);
  891. out:
  892. if (status)
  893. mlog_errno(status);
  894. return status;
  895. }
  896. /* This should happen only after set_dqinfo(). */
  897. static int ocfs2_write_info(struct super_block *sb, int type)
  898. {
  899. handle_t *handle;
  900. int status = 0;
  901. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  902. status = ocfs2_lock_global_qf(oinfo, 1);
  903. if (status < 0)
  904. goto out;
  905. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  906. if (IS_ERR(handle)) {
  907. status = PTR_ERR(handle);
  908. mlog_errno(status);
  909. goto out_ilock;
  910. }
  911. status = dquot_commit_info(sb, type);
  912. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  913. out_ilock:
  914. ocfs2_unlock_global_qf(oinfo, 1);
  915. out:
  916. if (status)
  917. mlog_errno(status);
  918. return status;
  919. }
  920. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  921. {
  922. struct ocfs2_dquot *dquot =
  923. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  924. if (!dquot)
  925. return NULL;
  926. return &dquot->dq_dquot;
  927. }
  928. static void ocfs2_destroy_dquot(struct dquot *dquot)
  929. {
  930. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  931. }
  932. const struct dquot_operations ocfs2_quota_operations = {
  933. /* We never make dquot dirty so .write_dquot is never called */
  934. .acquire_dquot = ocfs2_acquire_dquot,
  935. .release_dquot = ocfs2_release_dquot,
  936. .mark_dirty = ocfs2_mark_dquot_dirty,
  937. .write_info = ocfs2_write_info,
  938. .alloc_dquot = ocfs2_alloc_dquot,
  939. .destroy_dquot = ocfs2_destroy_dquot,
  940. .get_next_id = ocfs2_get_next_id,
  941. };