xfs_iunlink_item.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2020-2022, Red Hat, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_inode.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_trans_priv.h"
  16. #include "xfs_ag.h"
  17. #include "xfs_iunlink_item.h"
  18. #include "xfs_trace.h"
  19. #include "xfs_error.h"
  20. struct kmem_cache *xfs_iunlink_cache;
  21. static inline struct xfs_iunlink_item *IUL_ITEM(struct xfs_log_item *lip)
  22. {
  23. return container_of(lip, struct xfs_iunlink_item, item);
  24. }
  25. static void
  26. xfs_iunlink_item_release(
  27. struct xfs_log_item *lip)
  28. {
  29. struct xfs_iunlink_item *iup = IUL_ITEM(lip);
  30. xfs_perag_put(iup->pag);
  31. kmem_cache_free(xfs_iunlink_cache, IUL_ITEM(lip));
  32. }
  33. static uint64_t
  34. xfs_iunlink_item_sort(
  35. struct xfs_log_item *lip)
  36. {
  37. return IUL_ITEM(lip)->ip->i_ino;
  38. }
  39. /*
  40. * Look up the inode cluster buffer and log the on-disk unlinked inode change
  41. * we need to make.
  42. */
  43. static int
  44. xfs_iunlink_log_dinode(
  45. struct xfs_trans *tp,
  46. struct xfs_iunlink_item *iup)
  47. {
  48. struct xfs_mount *mp = tp->t_mountp;
  49. struct xfs_inode *ip = iup->ip;
  50. struct xfs_dinode *dip;
  51. struct xfs_buf *ibp;
  52. int offset;
  53. int error;
  54. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
  55. if (error)
  56. return error;
  57. /*
  58. * Don't log the unlinked field on stale buffers as this may be the
  59. * transaction that frees the inode cluster and relogging the buffer
  60. * here will incorrectly remove the stale state.
  61. */
  62. if (ibp->b_flags & XBF_STALE)
  63. goto out;
  64. dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
  65. /* Make sure the old pointer isn't garbage. */
  66. if (be32_to_cpu(dip->di_next_unlinked) != iup->old_agino) {
  67. xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
  68. sizeof(*dip), __this_address);
  69. error = -EFSCORRUPTED;
  70. goto out;
  71. }
  72. trace_xfs_iunlink_update_dinode(mp, iup->pag->pag_agno,
  73. XFS_INO_TO_AGINO(mp, ip->i_ino),
  74. be32_to_cpu(dip->di_next_unlinked), iup->next_agino);
  75. dip->di_next_unlinked = cpu_to_be32(iup->next_agino);
  76. offset = ip->i_imap.im_boffset +
  77. offsetof(struct xfs_dinode, di_next_unlinked);
  78. xfs_dinode_calc_crc(mp, dip);
  79. xfs_trans_inode_buf(tp, ibp);
  80. xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
  81. return 0;
  82. out:
  83. xfs_trans_brelse(tp, ibp);
  84. return error;
  85. }
  86. /*
  87. * On precommit, we grab the inode cluster buffer for the inode number we were
  88. * passed, then update the next unlinked field for that inode in the buffer and
  89. * log the buffer. This ensures that the inode cluster buffer was logged in the
  90. * correct order w.r.t. other inode cluster buffers. We can then remove the
  91. * iunlink item from the transaction and release it as it is has now served it's
  92. * purpose.
  93. */
  94. static int
  95. xfs_iunlink_item_precommit(
  96. struct xfs_trans *tp,
  97. struct xfs_log_item *lip)
  98. {
  99. struct xfs_iunlink_item *iup = IUL_ITEM(lip);
  100. int error;
  101. error = xfs_iunlink_log_dinode(tp, iup);
  102. list_del(&lip->li_trans);
  103. xfs_iunlink_item_release(lip);
  104. return error;
  105. }
  106. static const struct xfs_item_ops xfs_iunlink_item_ops = {
  107. .iop_release = xfs_iunlink_item_release,
  108. .iop_sort = xfs_iunlink_item_sort,
  109. .iop_precommit = xfs_iunlink_item_precommit,
  110. };
  111. /*
  112. * Initialize the inode log item for a newly allocated (in-core) inode.
  113. *
  114. * Inode extents can only reside within an AG. Hence specify the starting
  115. * block for the inode chunk by offset within an AG as well as the
  116. * length of the allocated extent.
  117. *
  118. * This joins the item to the transaction and marks it dirty so
  119. * that we don't need a separate call to do this, nor does the
  120. * caller need to know anything about the iunlink item.
  121. */
  122. int
  123. xfs_iunlink_log_inode(
  124. struct xfs_trans *tp,
  125. struct xfs_inode *ip,
  126. struct xfs_perag *pag,
  127. xfs_agino_t next_agino)
  128. {
  129. struct xfs_mount *mp = tp->t_mountp;
  130. struct xfs_iunlink_item *iup;
  131. ASSERT(xfs_verify_agino_or_null(pag, next_agino));
  132. ASSERT(xfs_verify_agino_or_null(pag, ip->i_next_unlinked));
  133. /*
  134. * Since we're updating a linked list, we should never find that the
  135. * current pointer is the same as the new value, unless we're
  136. * terminating the list.
  137. */
  138. if (ip->i_next_unlinked == next_agino) {
  139. if (next_agino != NULLAGINO)
  140. return -EFSCORRUPTED;
  141. return 0;
  142. }
  143. iup = kmem_cache_zalloc(xfs_iunlink_cache, GFP_KERNEL | __GFP_NOFAIL);
  144. xfs_log_item_init(mp, &iup->item, XFS_LI_IUNLINK,
  145. &xfs_iunlink_item_ops);
  146. iup->ip = ip;
  147. iup->next_agino = next_agino;
  148. iup->old_agino = ip->i_next_unlinked;
  149. atomic_inc(&pag->pag_ref);
  150. iup->pag = pag;
  151. xfs_trans_add_item(tp, &iup->item);
  152. tp->t_flags |= XFS_TRANS_DIRTY;
  153. set_bit(XFS_LI_DIRTY, &iup->item.li_flags);
  154. return 0;
  155. }