xfs_refcount.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <[email protected]>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_defer.h"
  14. #include "xfs_btree.h"
  15. #include "xfs_bmap.h"
  16. #include "xfs_refcount_btree.h"
  17. #include "xfs_alloc.h"
  18. #include "xfs_errortag.h"
  19. #include "xfs_error.h"
  20. #include "xfs_trace.h"
  21. #include "xfs_trans.h"
  22. #include "xfs_bit.h"
  23. #include "xfs_refcount.h"
  24. #include "xfs_rmap.h"
  25. #include "xfs_ag.h"
  26. struct kmem_cache *xfs_refcount_intent_cache;
  27. /* Allowable refcount adjustment amounts. */
  28. enum xfs_refc_adjust_op {
  29. XFS_REFCOUNT_ADJUST_INCREASE = 1,
  30. XFS_REFCOUNT_ADJUST_DECREASE = -1,
  31. XFS_REFCOUNT_ADJUST_COW_ALLOC = 0,
  32. XFS_REFCOUNT_ADJUST_COW_FREE = -1,
  33. };
  34. STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
  35. xfs_agblock_t agbno, xfs_extlen_t aglen);
  36. STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
  37. xfs_agblock_t agbno, xfs_extlen_t aglen);
  38. /*
  39. * Look up the first record less than or equal to [bno, len] in the btree
  40. * given by cur.
  41. */
  42. int
  43. xfs_refcount_lookup_le(
  44. struct xfs_btree_cur *cur,
  45. enum xfs_refc_domain domain,
  46. xfs_agblock_t bno,
  47. int *stat)
  48. {
  49. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  50. xfs_refcount_encode_startblock(bno, domain),
  51. XFS_LOOKUP_LE);
  52. cur->bc_rec.rc.rc_startblock = bno;
  53. cur->bc_rec.rc.rc_blockcount = 0;
  54. cur->bc_rec.rc.rc_domain = domain;
  55. return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
  56. }
  57. /*
  58. * Look up the first record greater than or equal to [bno, len] in the btree
  59. * given by cur.
  60. */
  61. int
  62. xfs_refcount_lookup_ge(
  63. struct xfs_btree_cur *cur,
  64. enum xfs_refc_domain domain,
  65. xfs_agblock_t bno,
  66. int *stat)
  67. {
  68. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  69. xfs_refcount_encode_startblock(bno, domain),
  70. XFS_LOOKUP_GE);
  71. cur->bc_rec.rc.rc_startblock = bno;
  72. cur->bc_rec.rc.rc_blockcount = 0;
  73. cur->bc_rec.rc.rc_domain = domain;
  74. return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
  75. }
  76. /*
  77. * Look up the first record equal to [bno, len] in the btree
  78. * given by cur.
  79. */
  80. int
  81. xfs_refcount_lookup_eq(
  82. struct xfs_btree_cur *cur,
  83. enum xfs_refc_domain domain,
  84. xfs_agblock_t bno,
  85. int *stat)
  86. {
  87. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  88. xfs_refcount_encode_startblock(bno, domain),
  89. XFS_LOOKUP_LE);
  90. cur->bc_rec.rc.rc_startblock = bno;
  91. cur->bc_rec.rc.rc_blockcount = 0;
  92. cur->bc_rec.rc.rc_domain = domain;
  93. return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
  94. }
  95. /* Convert on-disk record to in-core format. */
  96. void
  97. xfs_refcount_btrec_to_irec(
  98. const union xfs_btree_rec *rec,
  99. struct xfs_refcount_irec *irec)
  100. {
  101. uint32_t start;
  102. start = be32_to_cpu(rec->refc.rc_startblock);
  103. if (start & XFS_REFC_COWFLAG) {
  104. start &= ~XFS_REFC_COWFLAG;
  105. irec->rc_domain = XFS_REFC_DOMAIN_COW;
  106. } else {
  107. irec->rc_domain = XFS_REFC_DOMAIN_SHARED;
  108. }
  109. irec->rc_startblock = start;
  110. irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
  111. irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
  112. }
  113. /*
  114. * Get the data from the pointed-to record.
  115. */
  116. int
  117. xfs_refcount_get_rec(
  118. struct xfs_btree_cur *cur,
  119. struct xfs_refcount_irec *irec,
  120. int *stat)
  121. {
  122. struct xfs_mount *mp = cur->bc_mp;
  123. struct xfs_perag *pag = cur->bc_ag.pag;
  124. union xfs_btree_rec *rec;
  125. int error;
  126. error = xfs_btree_get_rec(cur, &rec, stat);
  127. if (error || !*stat)
  128. return error;
  129. xfs_refcount_btrec_to_irec(rec, irec);
  130. if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
  131. goto out_bad_rec;
  132. if (!xfs_refcount_check_domain(irec))
  133. goto out_bad_rec;
  134. /* check for valid extent range, including overflow */
  135. if (!xfs_verify_agbext(pag, irec->rc_startblock, irec->rc_blockcount))
  136. goto out_bad_rec;
  137. if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
  138. goto out_bad_rec;
  139. trace_xfs_refcount_get(cur->bc_mp, pag->pag_agno, irec);
  140. return 0;
  141. out_bad_rec:
  142. xfs_warn(mp,
  143. "Refcount BTree record corruption in AG %d detected!",
  144. pag->pag_agno);
  145. xfs_warn(mp,
  146. "Start block 0x%x, block count 0x%x, references 0x%x",
  147. irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
  148. return -EFSCORRUPTED;
  149. }
  150. /*
  151. * Update the record referred to by cur to the value given
  152. * by [bno, len, refcount].
  153. * This either works (return 0) or gets an EFSCORRUPTED error.
  154. */
  155. STATIC int
  156. xfs_refcount_update(
  157. struct xfs_btree_cur *cur,
  158. struct xfs_refcount_irec *irec)
  159. {
  160. union xfs_btree_rec rec;
  161. uint32_t start;
  162. int error;
  163. trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
  164. start = xfs_refcount_encode_startblock(irec->rc_startblock,
  165. irec->rc_domain);
  166. rec.refc.rc_startblock = cpu_to_be32(start);
  167. rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
  168. rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
  169. error = xfs_btree_update(cur, &rec);
  170. if (error)
  171. trace_xfs_refcount_update_error(cur->bc_mp,
  172. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  173. return error;
  174. }
  175. /*
  176. * Insert the record referred to by cur to the value given
  177. * by [bno, len, refcount].
  178. * This either works (return 0) or gets an EFSCORRUPTED error.
  179. */
  180. int
  181. xfs_refcount_insert(
  182. struct xfs_btree_cur *cur,
  183. struct xfs_refcount_irec *irec,
  184. int *i)
  185. {
  186. int error;
  187. trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
  188. cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
  189. cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
  190. cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
  191. cur->bc_rec.rc.rc_domain = irec->rc_domain;
  192. error = xfs_btree_insert(cur, i);
  193. if (error)
  194. goto out_error;
  195. if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
  196. error = -EFSCORRUPTED;
  197. goto out_error;
  198. }
  199. out_error:
  200. if (error)
  201. trace_xfs_refcount_insert_error(cur->bc_mp,
  202. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  203. return error;
  204. }
  205. /*
  206. * Remove the record referred to by cur, then set the pointer to the spot
  207. * where the record could be re-inserted, in case we want to increment or
  208. * decrement the cursor.
  209. * This either works (return 0) or gets an EFSCORRUPTED error.
  210. */
  211. STATIC int
  212. xfs_refcount_delete(
  213. struct xfs_btree_cur *cur,
  214. int *i)
  215. {
  216. struct xfs_refcount_irec irec;
  217. int found_rec;
  218. int error;
  219. error = xfs_refcount_get_rec(cur, &irec, &found_rec);
  220. if (error)
  221. goto out_error;
  222. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  223. error = -EFSCORRUPTED;
  224. goto out_error;
  225. }
  226. trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.pag->pag_agno, &irec);
  227. error = xfs_btree_delete(cur, i);
  228. if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
  229. error = -EFSCORRUPTED;
  230. goto out_error;
  231. }
  232. if (error)
  233. goto out_error;
  234. error = xfs_refcount_lookup_ge(cur, irec.rc_domain, irec.rc_startblock,
  235. &found_rec);
  236. out_error:
  237. if (error)
  238. trace_xfs_refcount_delete_error(cur->bc_mp,
  239. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  240. return error;
  241. }
  242. /*
  243. * Adjusting the Reference Count
  244. *
  245. * As stated elsewhere, the reference count btree (refcbt) stores
  246. * >1 reference counts for extents of physical blocks. In this
  247. * operation, we're either raising or lowering the reference count of
  248. * some subrange stored in the tree:
  249. *
  250. * <------ adjustment range ------>
  251. * ----+ +---+-----+ +--+--------+---------
  252. * 2 | | 3 | 4 | |17| 55 | 10
  253. * ----+ +---+-----+ +--+--------+---------
  254. * X axis is physical blocks number;
  255. * reference counts are the numbers inside the rectangles
  256. *
  257. * The first thing we need to do is to ensure that there are no
  258. * refcount extents crossing either boundary of the range to be
  259. * adjusted. For any extent that does cross a boundary, split it into
  260. * two extents so that we can increment the refcount of one of the
  261. * pieces later:
  262. *
  263. * <------ adjustment range ------>
  264. * ----+ +---+-----+ +--+--------+----+----
  265. * 2 | | 3 | 2 | |17| 55 | 10 | 10
  266. * ----+ +---+-----+ +--+--------+----+----
  267. *
  268. * For this next step, let's assume that all the physical blocks in
  269. * the adjustment range are mapped to a file and are therefore in use
  270. * at least once. Therefore, we can infer that any gap in the
  271. * refcount tree within the adjustment range represents a physical
  272. * extent with refcount == 1:
  273. *
  274. * <------ adjustment range ------>
  275. * ----+---+---+-----+-+--+--------+----+----
  276. * 2 |"1"| 3 | 2 |1|17| 55 | 10 | 10
  277. * ----+---+---+-----+-+--+--------+----+----
  278. * ^
  279. *
  280. * For each extent that falls within the interval range, figure out
  281. * which extent is to the left or the right of that extent. Now we
  282. * have a left, current, and right extent. If the new reference count
  283. * of the center extent enables us to merge left, center, and right
  284. * into one record covering all three, do so. If the center extent is
  285. * at the left end of the range, abuts the left extent, and its new
  286. * reference count matches the left extent's record, then merge them.
  287. * If the center extent is at the right end of the range, abuts the
  288. * right extent, and the reference counts match, merge those. In the
  289. * example, we can left merge (assuming an increment operation):
  290. *
  291. * <------ adjustment range ------>
  292. * --------+---+-----+-+--+--------+----+----
  293. * 2 | 3 | 2 |1|17| 55 | 10 | 10
  294. * --------+---+-----+-+--+--------+----+----
  295. * ^
  296. *
  297. * For all other extents within the range, adjust the reference count
  298. * or delete it if the refcount falls below 2. If we were
  299. * incrementing, the end result looks like this:
  300. *
  301. * <------ adjustment range ------>
  302. * --------+---+-----+-+--+--------+----+----
  303. * 2 | 4 | 3 |2|18| 56 | 11 | 10
  304. * --------+---+-----+-+--+--------+----+----
  305. *
  306. * The result of a decrement operation looks as such:
  307. *
  308. * <------ adjustment range ------>
  309. * ----+ +---+ +--+--------+----+----
  310. * 2 | | 2 | |16| 54 | 9 | 10
  311. * ----+ +---+ +--+--------+----+----
  312. * DDDD 111111DD
  313. *
  314. * The blocks marked "D" are freed; the blocks marked "1" are only
  315. * referenced once and therefore the record is removed from the
  316. * refcount btree.
  317. */
  318. /* Next block after this extent. */
  319. static inline xfs_agblock_t
  320. xfs_refc_next(
  321. struct xfs_refcount_irec *rc)
  322. {
  323. return rc->rc_startblock + rc->rc_blockcount;
  324. }
  325. /*
  326. * Split a refcount extent that crosses agbno.
  327. */
  328. STATIC int
  329. xfs_refcount_split_extent(
  330. struct xfs_btree_cur *cur,
  331. enum xfs_refc_domain domain,
  332. xfs_agblock_t agbno,
  333. bool *shape_changed)
  334. {
  335. struct xfs_refcount_irec rcext, tmp;
  336. int found_rec;
  337. int error;
  338. *shape_changed = false;
  339. error = xfs_refcount_lookup_le(cur, domain, agbno, &found_rec);
  340. if (error)
  341. goto out_error;
  342. if (!found_rec)
  343. return 0;
  344. error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
  345. if (error)
  346. goto out_error;
  347. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  348. error = -EFSCORRUPTED;
  349. goto out_error;
  350. }
  351. if (rcext.rc_domain != domain)
  352. return 0;
  353. if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
  354. return 0;
  355. *shape_changed = true;
  356. trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  357. &rcext, agbno);
  358. /* Establish the right extent. */
  359. tmp = rcext;
  360. tmp.rc_startblock = agbno;
  361. tmp.rc_blockcount -= (agbno - rcext.rc_startblock);
  362. error = xfs_refcount_update(cur, &tmp);
  363. if (error)
  364. goto out_error;
  365. /* Insert the left extent. */
  366. tmp = rcext;
  367. tmp.rc_blockcount = agbno - rcext.rc_startblock;
  368. error = xfs_refcount_insert(cur, &tmp, &found_rec);
  369. if (error)
  370. goto out_error;
  371. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  372. error = -EFSCORRUPTED;
  373. goto out_error;
  374. }
  375. return error;
  376. out_error:
  377. trace_xfs_refcount_split_extent_error(cur->bc_mp,
  378. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  379. return error;
  380. }
  381. /*
  382. * Merge the left, center, and right extents.
  383. */
  384. STATIC int
  385. xfs_refcount_merge_center_extents(
  386. struct xfs_btree_cur *cur,
  387. struct xfs_refcount_irec *left,
  388. struct xfs_refcount_irec *center,
  389. struct xfs_refcount_irec *right,
  390. unsigned long long extlen,
  391. xfs_extlen_t *aglen)
  392. {
  393. int error;
  394. int found_rec;
  395. trace_xfs_refcount_merge_center_extents(cur->bc_mp,
  396. cur->bc_ag.pag->pag_agno, left, center, right);
  397. ASSERT(left->rc_domain == center->rc_domain);
  398. ASSERT(right->rc_domain == center->rc_domain);
  399. /*
  400. * Make sure the center and right extents are not in the btree.
  401. * If the center extent was synthesized, the first delete call
  402. * removes the right extent and we skip the second deletion.
  403. * If center and right were in the btree, then the first delete
  404. * call removes the center and the second one removes the right
  405. * extent.
  406. */
  407. error = xfs_refcount_lookup_ge(cur, center->rc_domain,
  408. center->rc_startblock, &found_rec);
  409. if (error)
  410. goto out_error;
  411. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  412. error = -EFSCORRUPTED;
  413. goto out_error;
  414. }
  415. error = xfs_refcount_delete(cur, &found_rec);
  416. if (error)
  417. goto out_error;
  418. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  419. error = -EFSCORRUPTED;
  420. goto out_error;
  421. }
  422. if (center->rc_refcount > 1) {
  423. error = xfs_refcount_delete(cur, &found_rec);
  424. if (error)
  425. goto out_error;
  426. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  427. error = -EFSCORRUPTED;
  428. goto out_error;
  429. }
  430. }
  431. /* Enlarge the left extent. */
  432. error = xfs_refcount_lookup_le(cur, left->rc_domain,
  433. left->rc_startblock, &found_rec);
  434. if (error)
  435. goto out_error;
  436. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  437. error = -EFSCORRUPTED;
  438. goto out_error;
  439. }
  440. left->rc_blockcount = extlen;
  441. error = xfs_refcount_update(cur, left);
  442. if (error)
  443. goto out_error;
  444. *aglen = 0;
  445. return error;
  446. out_error:
  447. trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
  448. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  449. return error;
  450. }
  451. /*
  452. * Merge with the left extent.
  453. */
  454. STATIC int
  455. xfs_refcount_merge_left_extent(
  456. struct xfs_btree_cur *cur,
  457. struct xfs_refcount_irec *left,
  458. struct xfs_refcount_irec *cleft,
  459. xfs_agblock_t *agbno,
  460. xfs_extlen_t *aglen)
  461. {
  462. int error;
  463. int found_rec;
  464. trace_xfs_refcount_merge_left_extent(cur->bc_mp,
  465. cur->bc_ag.pag->pag_agno, left, cleft);
  466. ASSERT(left->rc_domain == cleft->rc_domain);
  467. /* If the extent at agbno (cleft) wasn't synthesized, remove it. */
  468. if (cleft->rc_refcount > 1) {
  469. error = xfs_refcount_lookup_le(cur, cleft->rc_domain,
  470. cleft->rc_startblock, &found_rec);
  471. if (error)
  472. goto out_error;
  473. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  474. error = -EFSCORRUPTED;
  475. goto out_error;
  476. }
  477. error = xfs_refcount_delete(cur, &found_rec);
  478. if (error)
  479. goto out_error;
  480. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  481. error = -EFSCORRUPTED;
  482. goto out_error;
  483. }
  484. }
  485. /* Enlarge the left extent. */
  486. error = xfs_refcount_lookup_le(cur, left->rc_domain,
  487. left->rc_startblock, &found_rec);
  488. if (error)
  489. goto out_error;
  490. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  491. error = -EFSCORRUPTED;
  492. goto out_error;
  493. }
  494. left->rc_blockcount += cleft->rc_blockcount;
  495. error = xfs_refcount_update(cur, left);
  496. if (error)
  497. goto out_error;
  498. *agbno += cleft->rc_blockcount;
  499. *aglen -= cleft->rc_blockcount;
  500. return error;
  501. out_error:
  502. trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
  503. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  504. return error;
  505. }
  506. /*
  507. * Merge with the right extent.
  508. */
  509. STATIC int
  510. xfs_refcount_merge_right_extent(
  511. struct xfs_btree_cur *cur,
  512. struct xfs_refcount_irec *right,
  513. struct xfs_refcount_irec *cright,
  514. xfs_extlen_t *aglen)
  515. {
  516. int error;
  517. int found_rec;
  518. trace_xfs_refcount_merge_right_extent(cur->bc_mp,
  519. cur->bc_ag.pag->pag_agno, cright, right);
  520. ASSERT(right->rc_domain == cright->rc_domain);
  521. /*
  522. * If the extent ending at agbno+aglen (cright) wasn't synthesized,
  523. * remove it.
  524. */
  525. if (cright->rc_refcount > 1) {
  526. error = xfs_refcount_lookup_le(cur, cright->rc_domain,
  527. cright->rc_startblock, &found_rec);
  528. if (error)
  529. goto out_error;
  530. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  531. error = -EFSCORRUPTED;
  532. goto out_error;
  533. }
  534. error = xfs_refcount_delete(cur, &found_rec);
  535. if (error)
  536. goto out_error;
  537. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  538. error = -EFSCORRUPTED;
  539. goto out_error;
  540. }
  541. }
  542. /* Enlarge the right extent. */
  543. error = xfs_refcount_lookup_le(cur, right->rc_domain,
  544. right->rc_startblock, &found_rec);
  545. if (error)
  546. goto out_error;
  547. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  548. error = -EFSCORRUPTED;
  549. goto out_error;
  550. }
  551. right->rc_startblock -= cright->rc_blockcount;
  552. right->rc_blockcount += cright->rc_blockcount;
  553. error = xfs_refcount_update(cur, right);
  554. if (error)
  555. goto out_error;
  556. *aglen -= cright->rc_blockcount;
  557. return error;
  558. out_error:
  559. trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
  560. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  561. return error;
  562. }
  563. /*
  564. * Find the left extent and the one after it (cleft). This function assumes
  565. * that we've already split any extent crossing agbno.
  566. */
  567. STATIC int
  568. xfs_refcount_find_left_extents(
  569. struct xfs_btree_cur *cur,
  570. struct xfs_refcount_irec *left,
  571. struct xfs_refcount_irec *cleft,
  572. enum xfs_refc_domain domain,
  573. xfs_agblock_t agbno,
  574. xfs_extlen_t aglen)
  575. {
  576. struct xfs_refcount_irec tmp;
  577. int error;
  578. int found_rec;
  579. left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
  580. error = xfs_refcount_lookup_le(cur, domain, agbno - 1, &found_rec);
  581. if (error)
  582. goto out_error;
  583. if (!found_rec)
  584. return 0;
  585. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  586. if (error)
  587. goto out_error;
  588. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  589. error = -EFSCORRUPTED;
  590. goto out_error;
  591. }
  592. if (tmp.rc_domain != domain)
  593. return 0;
  594. if (xfs_refc_next(&tmp) != agbno)
  595. return 0;
  596. /* We have a left extent; retrieve (or invent) the next right one */
  597. *left = tmp;
  598. error = xfs_btree_increment(cur, 0, &found_rec);
  599. if (error)
  600. goto out_error;
  601. if (found_rec) {
  602. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  603. if (error)
  604. goto out_error;
  605. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  606. error = -EFSCORRUPTED;
  607. goto out_error;
  608. }
  609. if (tmp.rc_domain != domain)
  610. goto not_found;
  611. /* if tmp starts at the end of our range, just use that */
  612. if (tmp.rc_startblock == agbno)
  613. *cleft = tmp;
  614. else {
  615. /*
  616. * There's a gap in the refcntbt at the start of the
  617. * range we're interested in (refcount == 1) so
  618. * synthesize the implied extent and pass it back.
  619. * We assume here that the agbno/aglen range was
  620. * passed in from a data fork extent mapping and
  621. * therefore is allocated to exactly one owner.
  622. */
  623. cleft->rc_startblock = agbno;
  624. cleft->rc_blockcount = min(aglen,
  625. tmp.rc_startblock - agbno);
  626. cleft->rc_refcount = 1;
  627. cleft->rc_domain = domain;
  628. }
  629. } else {
  630. not_found:
  631. /*
  632. * No extents, so pretend that there's one covering the whole
  633. * range.
  634. */
  635. cleft->rc_startblock = agbno;
  636. cleft->rc_blockcount = aglen;
  637. cleft->rc_refcount = 1;
  638. cleft->rc_domain = domain;
  639. }
  640. trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  641. left, cleft, agbno);
  642. return error;
  643. out_error:
  644. trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
  645. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  646. return error;
  647. }
  648. /*
  649. * Find the right extent and the one before it (cright). This function
  650. * assumes that we've already split any extents crossing agbno + aglen.
  651. */
  652. STATIC int
  653. xfs_refcount_find_right_extents(
  654. struct xfs_btree_cur *cur,
  655. struct xfs_refcount_irec *right,
  656. struct xfs_refcount_irec *cright,
  657. enum xfs_refc_domain domain,
  658. xfs_agblock_t agbno,
  659. xfs_extlen_t aglen)
  660. {
  661. struct xfs_refcount_irec tmp;
  662. int error;
  663. int found_rec;
  664. right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
  665. error = xfs_refcount_lookup_ge(cur, domain, agbno + aglen, &found_rec);
  666. if (error)
  667. goto out_error;
  668. if (!found_rec)
  669. return 0;
  670. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  671. if (error)
  672. goto out_error;
  673. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  674. error = -EFSCORRUPTED;
  675. goto out_error;
  676. }
  677. if (tmp.rc_domain != domain)
  678. return 0;
  679. if (tmp.rc_startblock != agbno + aglen)
  680. return 0;
  681. /* We have a right extent; retrieve (or invent) the next left one */
  682. *right = tmp;
  683. error = xfs_btree_decrement(cur, 0, &found_rec);
  684. if (error)
  685. goto out_error;
  686. if (found_rec) {
  687. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  688. if (error)
  689. goto out_error;
  690. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  691. error = -EFSCORRUPTED;
  692. goto out_error;
  693. }
  694. if (tmp.rc_domain != domain)
  695. goto not_found;
  696. /* if tmp ends at the end of our range, just use that */
  697. if (xfs_refc_next(&tmp) == agbno + aglen)
  698. *cright = tmp;
  699. else {
  700. /*
  701. * There's a gap in the refcntbt at the end of the
  702. * range we're interested in (refcount == 1) so
  703. * create the implied extent and pass it back.
  704. * We assume here that the agbno/aglen range was
  705. * passed in from a data fork extent mapping and
  706. * therefore is allocated to exactly one owner.
  707. */
  708. cright->rc_startblock = max(agbno, xfs_refc_next(&tmp));
  709. cright->rc_blockcount = right->rc_startblock -
  710. cright->rc_startblock;
  711. cright->rc_refcount = 1;
  712. cright->rc_domain = domain;
  713. }
  714. } else {
  715. not_found:
  716. /*
  717. * No extents, so pretend that there's one covering the whole
  718. * range.
  719. */
  720. cright->rc_startblock = agbno;
  721. cright->rc_blockcount = aglen;
  722. cright->rc_refcount = 1;
  723. cright->rc_domain = domain;
  724. }
  725. trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  726. cright, right, agbno + aglen);
  727. return error;
  728. out_error:
  729. trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
  730. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  731. return error;
  732. }
  733. /* Is this extent valid? */
  734. static inline bool
  735. xfs_refc_valid(
  736. struct xfs_refcount_irec *rc)
  737. {
  738. return rc->rc_startblock != NULLAGBLOCK;
  739. }
  740. /*
  741. * Try to merge with any extents on the boundaries of the adjustment range.
  742. */
  743. STATIC int
  744. xfs_refcount_merge_extents(
  745. struct xfs_btree_cur *cur,
  746. enum xfs_refc_domain domain,
  747. xfs_agblock_t *agbno,
  748. xfs_extlen_t *aglen,
  749. enum xfs_refc_adjust_op adjust,
  750. bool *shape_changed)
  751. {
  752. struct xfs_refcount_irec left = {0}, cleft = {0};
  753. struct xfs_refcount_irec cright = {0}, right = {0};
  754. int error;
  755. unsigned long long ulen;
  756. bool cequal;
  757. *shape_changed = false;
  758. /*
  759. * Find the extent just below agbno [left], just above agbno [cleft],
  760. * just below (agbno + aglen) [cright], and just above (agbno + aglen)
  761. * [right].
  762. */
  763. error = xfs_refcount_find_left_extents(cur, &left, &cleft, domain,
  764. *agbno, *aglen);
  765. if (error)
  766. return error;
  767. error = xfs_refcount_find_right_extents(cur, &right, &cright, domain,
  768. *agbno, *aglen);
  769. if (error)
  770. return error;
  771. /* No left or right extent to merge; exit. */
  772. if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right))
  773. return 0;
  774. cequal = (cleft.rc_startblock == cright.rc_startblock) &&
  775. (cleft.rc_blockcount == cright.rc_blockcount);
  776. /* Try to merge left, cleft, and right. cleft must == cright. */
  777. ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
  778. right.rc_blockcount;
  779. if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
  780. xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
  781. left.rc_refcount == cleft.rc_refcount + adjust &&
  782. right.rc_refcount == cleft.rc_refcount + adjust &&
  783. ulen < MAXREFCEXTLEN) {
  784. *shape_changed = true;
  785. return xfs_refcount_merge_center_extents(cur, &left, &cleft,
  786. &right, ulen, aglen);
  787. }
  788. /* Try to merge left and cleft. */
  789. ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
  790. if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
  791. left.rc_refcount == cleft.rc_refcount + adjust &&
  792. ulen < MAXREFCEXTLEN) {
  793. *shape_changed = true;
  794. error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
  795. agbno, aglen);
  796. if (error)
  797. return error;
  798. /*
  799. * If we just merged left + cleft and cleft == cright,
  800. * we no longer have a cright to merge with right. We're done.
  801. */
  802. if (cequal)
  803. return 0;
  804. }
  805. /* Try to merge cright and right. */
  806. ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
  807. if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
  808. right.rc_refcount == cright.rc_refcount + adjust &&
  809. ulen < MAXREFCEXTLEN) {
  810. *shape_changed = true;
  811. return xfs_refcount_merge_right_extent(cur, &right, &cright,
  812. aglen);
  813. }
  814. return 0;
  815. }
  816. /*
  817. * XXX: This is a pretty hand-wavy estimate. The penalty for guessing
  818. * true incorrectly is a shutdown FS; the penalty for guessing false
  819. * incorrectly is more transaction rolls than might be necessary.
  820. * Be conservative here.
  821. */
  822. static bool
  823. xfs_refcount_still_have_space(
  824. struct xfs_btree_cur *cur)
  825. {
  826. unsigned long overhead;
  827. /*
  828. * Worst case estimate: full splits of the free space and rmap btrees
  829. * to handle each of the shape changes to the refcount btree.
  830. */
  831. overhead = xfs_allocfree_block_count(cur->bc_mp,
  832. cur->bc_ag.refc.shape_changes);
  833. overhead += cur->bc_mp->m_refc_maxlevels;
  834. overhead *= cur->bc_mp->m_sb.sb_blocksize;
  835. /*
  836. * Only allow 2 refcount extent updates per transaction if the
  837. * refcount continue update "error" has been injected.
  838. */
  839. if (cur->bc_ag.refc.nr_ops > 2 &&
  840. XFS_TEST_ERROR(false, cur->bc_mp,
  841. XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
  842. return false;
  843. if (cur->bc_ag.refc.nr_ops == 0)
  844. return true;
  845. else if (overhead > cur->bc_tp->t_log_res)
  846. return false;
  847. return cur->bc_tp->t_log_res - overhead >
  848. cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
  849. }
  850. /*
  851. * Adjust the refcounts of middle extents. At this point we should have
  852. * split extents that crossed the adjustment range; merged with adjacent
  853. * extents; and updated agbno/aglen to reflect the merges. Therefore,
  854. * all we have to do is update the extents inside [agbno, agbno + aglen].
  855. */
  856. STATIC int
  857. xfs_refcount_adjust_extents(
  858. struct xfs_btree_cur *cur,
  859. xfs_agblock_t *agbno,
  860. xfs_extlen_t *aglen,
  861. enum xfs_refc_adjust_op adj)
  862. {
  863. struct xfs_refcount_irec ext, tmp;
  864. int error;
  865. int found_rec, found_tmp;
  866. xfs_fsblock_t fsbno;
  867. /* Merging did all the work already. */
  868. if (*aglen == 0)
  869. return 0;
  870. error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_SHARED, *agbno,
  871. &found_rec);
  872. if (error)
  873. goto out_error;
  874. while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
  875. error = xfs_refcount_get_rec(cur, &ext, &found_rec);
  876. if (error)
  877. goto out_error;
  878. if (!found_rec || ext.rc_domain != XFS_REFC_DOMAIN_SHARED) {
  879. ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
  880. ext.rc_blockcount = 0;
  881. ext.rc_refcount = 0;
  882. ext.rc_domain = XFS_REFC_DOMAIN_SHARED;
  883. }
  884. /*
  885. * Deal with a hole in the refcount tree; if a file maps to
  886. * these blocks and there's no refcountbt record, pretend that
  887. * there is one with refcount == 1.
  888. */
  889. if (ext.rc_startblock != *agbno) {
  890. tmp.rc_startblock = *agbno;
  891. tmp.rc_blockcount = min(*aglen,
  892. ext.rc_startblock - *agbno);
  893. tmp.rc_refcount = 1 + adj;
  894. tmp.rc_domain = XFS_REFC_DOMAIN_SHARED;
  895. trace_xfs_refcount_modify_extent(cur->bc_mp,
  896. cur->bc_ag.pag->pag_agno, &tmp);
  897. /*
  898. * Either cover the hole (increment) or
  899. * delete the range (decrement).
  900. */
  901. cur->bc_ag.refc.nr_ops++;
  902. if (tmp.rc_refcount) {
  903. error = xfs_refcount_insert(cur, &tmp,
  904. &found_tmp);
  905. if (error)
  906. goto out_error;
  907. if (XFS_IS_CORRUPT(cur->bc_mp,
  908. found_tmp != 1)) {
  909. error = -EFSCORRUPTED;
  910. goto out_error;
  911. }
  912. } else {
  913. fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
  914. cur->bc_ag.pag->pag_agno,
  915. tmp.rc_startblock);
  916. xfs_free_extent_later(cur->bc_tp, fsbno,
  917. tmp.rc_blockcount, NULL);
  918. }
  919. (*agbno) += tmp.rc_blockcount;
  920. (*aglen) -= tmp.rc_blockcount;
  921. /* Stop if there's nothing left to modify */
  922. if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
  923. break;
  924. /* Move the cursor to the start of ext. */
  925. error = xfs_refcount_lookup_ge(cur,
  926. XFS_REFC_DOMAIN_SHARED, *agbno,
  927. &found_rec);
  928. if (error)
  929. goto out_error;
  930. }
  931. /*
  932. * A previous step trimmed agbno/aglen such that the end of the
  933. * range would not be in the middle of the record. If this is
  934. * no longer the case, something is seriously wrong with the
  935. * btree. Make sure we never feed the synthesized record into
  936. * the processing loop below.
  937. */
  938. if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount == 0) ||
  939. XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount > *aglen)) {
  940. error = -EFSCORRUPTED;
  941. goto out_error;
  942. }
  943. /*
  944. * Adjust the reference count and either update the tree
  945. * (incr) or free the blocks (decr).
  946. */
  947. if (ext.rc_refcount == MAXREFCOUNT)
  948. goto skip;
  949. ext.rc_refcount += adj;
  950. trace_xfs_refcount_modify_extent(cur->bc_mp,
  951. cur->bc_ag.pag->pag_agno, &ext);
  952. cur->bc_ag.refc.nr_ops++;
  953. if (ext.rc_refcount > 1) {
  954. error = xfs_refcount_update(cur, &ext);
  955. if (error)
  956. goto out_error;
  957. } else if (ext.rc_refcount == 1) {
  958. error = xfs_refcount_delete(cur, &found_rec);
  959. if (error)
  960. goto out_error;
  961. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  962. error = -EFSCORRUPTED;
  963. goto out_error;
  964. }
  965. goto advloop;
  966. } else {
  967. fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
  968. cur->bc_ag.pag->pag_agno,
  969. ext.rc_startblock);
  970. xfs_free_extent_later(cur->bc_tp, fsbno,
  971. ext.rc_blockcount, NULL);
  972. }
  973. skip:
  974. error = xfs_btree_increment(cur, 0, &found_rec);
  975. if (error)
  976. goto out_error;
  977. advloop:
  978. (*agbno) += ext.rc_blockcount;
  979. (*aglen) -= ext.rc_blockcount;
  980. }
  981. return error;
  982. out_error:
  983. trace_xfs_refcount_modify_extent_error(cur->bc_mp,
  984. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  985. return error;
  986. }
  987. /* Adjust the reference count of a range of AG blocks. */
  988. STATIC int
  989. xfs_refcount_adjust(
  990. struct xfs_btree_cur *cur,
  991. xfs_agblock_t agbno,
  992. xfs_extlen_t aglen,
  993. xfs_agblock_t *new_agbno,
  994. xfs_extlen_t *new_aglen,
  995. enum xfs_refc_adjust_op adj)
  996. {
  997. bool shape_changed;
  998. int shape_changes = 0;
  999. int error;
  1000. *new_agbno = agbno;
  1001. *new_aglen = aglen;
  1002. if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
  1003. trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  1004. agbno, aglen);
  1005. else
  1006. trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  1007. agbno, aglen);
  1008. /*
  1009. * Ensure that no rcextents cross the boundary of the adjustment range.
  1010. */
  1011. error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
  1012. agbno, &shape_changed);
  1013. if (error)
  1014. goto out_error;
  1015. if (shape_changed)
  1016. shape_changes++;
  1017. error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
  1018. agbno + aglen, &shape_changed);
  1019. if (error)
  1020. goto out_error;
  1021. if (shape_changed)
  1022. shape_changes++;
  1023. /*
  1024. * Try to merge with the left or right extents of the range.
  1025. */
  1026. error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_SHARED,
  1027. new_agbno, new_aglen, adj, &shape_changed);
  1028. if (error)
  1029. goto out_error;
  1030. if (shape_changed)
  1031. shape_changes++;
  1032. if (shape_changes)
  1033. cur->bc_ag.refc.shape_changes++;
  1034. /* Now that we've taken care of the ends, adjust the middle extents */
  1035. error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen, adj);
  1036. if (error)
  1037. goto out_error;
  1038. return 0;
  1039. out_error:
  1040. trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  1041. error, _RET_IP_);
  1042. return error;
  1043. }
  1044. /* Clean up after calling xfs_refcount_finish_one. */
  1045. void
  1046. xfs_refcount_finish_one_cleanup(
  1047. struct xfs_trans *tp,
  1048. struct xfs_btree_cur *rcur,
  1049. int error)
  1050. {
  1051. struct xfs_buf *agbp;
  1052. if (rcur == NULL)
  1053. return;
  1054. agbp = rcur->bc_ag.agbp;
  1055. xfs_btree_del_cursor(rcur, error);
  1056. if (error)
  1057. xfs_trans_brelse(tp, agbp);
  1058. }
  1059. /*
  1060. * Set up a continuation a deferred refcount operation by updating the intent.
  1061. * Checks to make sure we're not going to run off the end of the AG.
  1062. */
  1063. static inline int
  1064. xfs_refcount_continue_op(
  1065. struct xfs_btree_cur *cur,
  1066. xfs_fsblock_t startblock,
  1067. xfs_agblock_t new_agbno,
  1068. xfs_extlen_t new_len,
  1069. xfs_fsblock_t *new_fsbno)
  1070. {
  1071. struct xfs_mount *mp = cur->bc_mp;
  1072. struct xfs_perag *pag = cur->bc_ag.pag;
  1073. if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno, new_len)))
  1074. return -EFSCORRUPTED;
  1075. *new_fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno);
  1076. ASSERT(xfs_verify_fsbext(mp, *new_fsbno, new_len));
  1077. ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, *new_fsbno));
  1078. return 0;
  1079. }
  1080. /*
  1081. * Process one of the deferred refcount operations. We pass back the
  1082. * btree cursor to maintain our lock on the btree between calls.
  1083. * This saves time and eliminates a buffer deadlock between the
  1084. * superblock and the AGF because we'll always grab them in the same
  1085. * order.
  1086. */
  1087. int
  1088. xfs_refcount_finish_one(
  1089. struct xfs_trans *tp,
  1090. enum xfs_refcount_intent_type type,
  1091. xfs_fsblock_t startblock,
  1092. xfs_extlen_t blockcount,
  1093. xfs_fsblock_t *new_fsb,
  1094. xfs_extlen_t *new_len,
  1095. struct xfs_btree_cur **pcur)
  1096. {
  1097. struct xfs_mount *mp = tp->t_mountp;
  1098. struct xfs_btree_cur *rcur;
  1099. struct xfs_buf *agbp = NULL;
  1100. int error = 0;
  1101. xfs_agblock_t bno;
  1102. xfs_agblock_t new_agbno;
  1103. unsigned long nr_ops = 0;
  1104. int shape_changes = 0;
  1105. struct xfs_perag *pag;
  1106. pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock));
  1107. bno = XFS_FSB_TO_AGBNO(mp, startblock);
  1108. trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
  1109. type, XFS_FSB_TO_AGBNO(mp, startblock),
  1110. blockcount);
  1111. if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) {
  1112. error = -EIO;
  1113. goto out_drop;
  1114. }
  1115. /*
  1116. * If we haven't gotten a cursor or the cursor AG doesn't match
  1117. * the startblock, get one now.
  1118. */
  1119. rcur = *pcur;
  1120. if (rcur != NULL && rcur->bc_ag.pag != pag) {
  1121. nr_ops = rcur->bc_ag.refc.nr_ops;
  1122. shape_changes = rcur->bc_ag.refc.shape_changes;
  1123. xfs_refcount_finish_one_cleanup(tp, rcur, 0);
  1124. rcur = NULL;
  1125. *pcur = NULL;
  1126. }
  1127. if (rcur == NULL) {
  1128. error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_FREEING,
  1129. &agbp);
  1130. if (error)
  1131. goto out_drop;
  1132. rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
  1133. rcur->bc_ag.refc.nr_ops = nr_ops;
  1134. rcur->bc_ag.refc.shape_changes = shape_changes;
  1135. }
  1136. *pcur = rcur;
  1137. switch (type) {
  1138. case XFS_REFCOUNT_INCREASE:
  1139. error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
  1140. new_len, XFS_REFCOUNT_ADJUST_INCREASE);
  1141. if (error)
  1142. goto out_drop;
  1143. if (*new_len > 0)
  1144. error = xfs_refcount_continue_op(rcur, startblock,
  1145. new_agbno, *new_len, new_fsb);
  1146. break;
  1147. case XFS_REFCOUNT_DECREASE:
  1148. error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
  1149. new_len, XFS_REFCOUNT_ADJUST_DECREASE);
  1150. if (error)
  1151. goto out_drop;
  1152. if (*new_len > 0)
  1153. error = xfs_refcount_continue_op(rcur, startblock,
  1154. new_agbno, *new_len, new_fsb);
  1155. break;
  1156. case XFS_REFCOUNT_ALLOC_COW:
  1157. *new_fsb = startblock + blockcount;
  1158. *new_len = 0;
  1159. error = __xfs_refcount_cow_alloc(rcur, bno, blockcount);
  1160. break;
  1161. case XFS_REFCOUNT_FREE_COW:
  1162. *new_fsb = startblock + blockcount;
  1163. *new_len = 0;
  1164. error = __xfs_refcount_cow_free(rcur, bno, blockcount);
  1165. break;
  1166. default:
  1167. ASSERT(0);
  1168. error = -EFSCORRUPTED;
  1169. }
  1170. if (!error && *new_len > 0)
  1171. trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, type,
  1172. bno, blockcount, new_agbno, *new_len);
  1173. out_drop:
  1174. xfs_perag_put(pag);
  1175. return error;
  1176. }
  1177. /*
  1178. * Record a refcount intent for later processing.
  1179. */
  1180. static void
  1181. __xfs_refcount_add(
  1182. struct xfs_trans *tp,
  1183. enum xfs_refcount_intent_type type,
  1184. xfs_fsblock_t startblock,
  1185. xfs_extlen_t blockcount)
  1186. {
  1187. struct xfs_refcount_intent *ri;
  1188. trace_xfs_refcount_defer(tp->t_mountp,
  1189. XFS_FSB_TO_AGNO(tp->t_mountp, startblock),
  1190. type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
  1191. blockcount);
  1192. ri = kmem_cache_alloc(xfs_refcount_intent_cache,
  1193. GFP_NOFS | __GFP_NOFAIL);
  1194. INIT_LIST_HEAD(&ri->ri_list);
  1195. ri->ri_type = type;
  1196. ri->ri_startblock = startblock;
  1197. ri->ri_blockcount = blockcount;
  1198. xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
  1199. }
  1200. /*
  1201. * Increase the reference count of the blocks backing a file's extent.
  1202. */
  1203. void
  1204. xfs_refcount_increase_extent(
  1205. struct xfs_trans *tp,
  1206. struct xfs_bmbt_irec *PREV)
  1207. {
  1208. if (!xfs_has_reflink(tp->t_mountp))
  1209. return;
  1210. __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock,
  1211. PREV->br_blockcount);
  1212. }
  1213. /*
  1214. * Decrease the reference count of the blocks backing a file's extent.
  1215. */
  1216. void
  1217. xfs_refcount_decrease_extent(
  1218. struct xfs_trans *tp,
  1219. struct xfs_bmbt_irec *PREV)
  1220. {
  1221. if (!xfs_has_reflink(tp->t_mountp))
  1222. return;
  1223. __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock,
  1224. PREV->br_blockcount);
  1225. }
  1226. /*
  1227. * Given an AG extent, find the lowest-numbered run of shared blocks
  1228. * within that range and return the range in fbno/flen. If
  1229. * find_end_of_shared is set, return the longest contiguous extent of
  1230. * shared blocks; if not, just return the first extent we find. If no
  1231. * shared blocks are found, fbno and flen will be set to NULLAGBLOCK
  1232. * and 0, respectively.
  1233. */
  1234. int
  1235. xfs_refcount_find_shared(
  1236. struct xfs_btree_cur *cur,
  1237. xfs_agblock_t agbno,
  1238. xfs_extlen_t aglen,
  1239. xfs_agblock_t *fbno,
  1240. xfs_extlen_t *flen,
  1241. bool find_end_of_shared)
  1242. {
  1243. struct xfs_refcount_irec tmp;
  1244. int i;
  1245. int have;
  1246. int error;
  1247. trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  1248. agbno, aglen);
  1249. /* By default, skip the whole range */
  1250. *fbno = NULLAGBLOCK;
  1251. *flen = 0;
  1252. /* Try to find a refcount extent that crosses the start */
  1253. error = xfs_refcount_lookup_le(cur, XFS_REFC_DOMAIN_SHARED, agbno,
  1254. &have);
  1255. if (error)
  1256. goto out_error;
  1257. if (!have) {
  1258. /* No left extent, look at the next one */
  1259. error = xfs_btree_increment(cur, 0, &have);
  1260. if (error)
  1261. goto out_error;
  1262. if (!have)
  1263. goto done;
  1264. }
  1265. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1266. if (error)
  1267. goto out_error;
  1268. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1269. error = -EFSCORRUPTED;
  1270. goto out_error;
  1271. }
  1272. if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED)
  1273. goto done;
  1274. /* If the extent ends before the start, look at the next one */
  1275. if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
  1276. error = xfs_btree_increment(cur, 0, &have);
  1277. if (error)
  1278. goto out_error;
  1279. if (!have)
  1280. goto done;
  1281. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1282. if (error)
  1283. goto out_error;
  1284. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1285. error = -EFSCORRUPTED;
  1286. goto out_error;
  1287. }
  1288. if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED)
  1289. goto done;
  1290. }
  1291. /* If the extent starts after the range we want, bail out */
  1292. if (tmp.rc_startblock >= agbno + aglen)
  1293. goto done;
  1294. /* We found the start of a shared extent! */
  1295. if (tmp.rc_startblock < agbno) {
  1296. tmp.rc_blockcount -= (agbno - tmp.rc_startblock);
  1297. tmp.rc_startblock = agbno;
  1298. }
  1299. *fbno = tmp.rc_startblock;
  1300. *flen = min(tmp.rc_blockcount, agbno + aglen - *fbno);
  1301. if (!find_end_of_shared)
  1302. goto done;
  1303. /* Otherwise, find the end of this shared extent */
  1304. while (*fbno + *flen < agbno + aglen) {
  1305. error = xfs_btree_increment(cur, 0, &have);
  1306. if (error)
  1307. goto out_error;
  1308. if (!have)
  1309. break;
  1310. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1311. if (error)
  1312. goto out_error;
  1313. if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
  1314. error = -EFSCORRUPTED;
  1315. goto out_error;
  1316. }
  1317. if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED ||
  1318. tmp.rc_startblock >= agbno + aglen ||
  1319. tmp.rc_startblock != *fbno + *flen)
  1320. break;
  1321. *flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
  1322. }
  1323. done:
  1324. trace_xfs_refcount_find_shared_result(cur->bc_mp,
  1325. cur->bc_ag.pag->pag_agno, *fbno, *flen);
  1326. out_error:
  1327. if (error)
  1328. trace_xfs_refcount_find_shared_error(cur->bc_mp,
  1329. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  1330. return error;
  1331. }
  1332. /*
  1333. * Recovering CoW Blocks After a Crash
  1334. *
  1335. * Due to the way that the copy on write mechanism works, there's a window of
  1336. * opportunity in which we can lose track of allocated blocks during a crash.
  1337. * Because CoW uses delayed allocation in the in-core CoW fork, writeback
  1338. * causes blocks to be allocated and stored in the CoW fork. The blocks are
  1339. * no longer in the free space btree but are not otherwise recorded anywhere
  1340. * until the write completes and the blocks are mapped into the file. A crash
  1341. * in between allocation and remapping results in the replacement blocks being
  1342. * lost. This situation is exacerbated by the CoW extent size hint because
  1343. * allocations can hang around for long time.
  1344. *
  1345. * However, there is a place where we can record these allocations before they
  1346. * become mappings -- the reference count btree. The btree does not record
  1347. * extents with refcount == 1, so we can record allocations with a refcount of
  1348. * 1. Blocks being used for CoW writeout cannot be shared, so there should be
  1349. * no conflict with shared block records. These mappings should be created
  1350. * when we allocate blocks to the CoW fork and deleted when they're removed
  1351. * from the CoW fork.
  1352. *
  1353. * Minor nit: records for in-progress CoW allocations and records for shared
  1354. * extents must never be merged, to preserve the property that (except for CoW
  1355. * allocations) there are no refcount btree entries with refcount == 1. The
  1356. * only time this could potentially happen is when unsharing a block that's
  1357. * adjacent to CoW allocations, so we must be careful to avoid this.
  1358. *
  1359. * At mount time we recover lost CoW allocations by searching the refcount
  1360. * btree for these refcount == 1 mappings. These represent CoW allocations
  1361. * that were in progress at the time the filesystem went down, so we can free
  1362. * them to get the space back.
  1363. *
  1364. * This mechanism is superior to creating EFIs for unmapped CoW extents for
  1365. * several reasons -- first, EFIs pin the tail of the log and would have to be
  1366. * periodically relogged to avoid filling up the log. Second, CoW completions
  1367. * will have to file an EFD and create new EFIs for whatever remains in the
  1368. * CoW fork; this partially takes care of (1) but extent-size reservations
  1369. * will have to periodically relog even if there's no writeout in progress.
  1370. * This can happen if the CoW extent size hint is set, which you really want.
  1371. * Third, EFIs cannot currently be automatically relogged into newer
  1372. * transactions to advance the log tail. Fourth, stuffing the log full of
  1373. * EFIs places an upper bound on the number of CoW allocations that can be
  1374. * held filesystem-wide at any given time. Recording them in the refcount
  1375. * btree doesn't require us to maintain any state in memory and doesn't pin
  1376. * the log.
  1377. */
  1378. /*
  1379. * Adjust the refcounts of CoW allocations. These allocations are "magic"
  1380. * in that they're not referenced anywhere else in the filesystem, so we
  1381. * stash them in the refcount btree with a refcount of 1 until either file
  1382. * remapping (or CoW cancellation) happens.
  1383. */
  1384. STATIC int
  1385. xfs_refcount_adjust_cow_extents(
  1386. struct xfs_btree_cur *cur,
  1387. xfs_agblock_t agbno,
  1388. xfs_extlen_t aglen,
  1389. enum xfs_refc_adjust_op adj)
  1390. {
  1391. struct xfs_refcount_irec ext, tmp;
  1392. int error;
  1393. int found_rec, found_tmp;
  1394. if (aglen == 0)
  1395. return 0;
  1396. /* Find any overlapping refcount records */
  1397. error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_COW, agbno,
  1398. &found_rec);
  1399. if (error)
  1400. goto out_error;
  1401. error = xfs_refcount_get_rec(cur, &ext, &found_rec);
  1402. if (error)
  1403. goto out_error;
  1404. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec &&
  1405. ext.rc_domain != XFS_REFC_DOMAIN_COW)) {
  1406. error = -EFSCORRUPTED;
  1407. goto out_error;
  1408. }
  1409. if (!found_rec) {
  1410. ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
  1411. ext.rc_blockcount = 0;
  1412. ext.rc_refcount = 0;
  1413. ext.rc_domain = XFS_REFC_DOMAIN_COW;
  1414. }
  1415. switch (adj) {
  1416. case XFS_REFCOUNT_ADJUST_COW_ALLOC:
  1417. /* Adding a CoW reservation, there should be nothing here. */
  1418. if (XFS_IS_CORRUPT(cur->bc_mp,
  1419. agbno + aglen > ext.rc_startblock)) {
  1420. error = -EFSCORRUPTED;
  1421. goto out_error;
  1422. }
  1423. tmp.rc_startblock = agbno;
  1424. tmp.rc_blockcount = aglen;
  1425. tmp.rc_refcount = 1;
  1426. tmp.rc_domain = XFS_REFC_DOMAIN_COW;
  1427. trace_xfs_refcount_modify_extent(cur->bc_mp,
  1428. cur->bc_ag.pag->pag_agno, &tmp);
  1429. error = xfs_refcount_insert(cur, &tmp,
  1430. &found_tmp);
  1431. if (error)
  1432. goto out_error;
  1433. if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
  1434. error = -EFSCORRUPTED;
  1435. goto out_error;
  1436. }
  1437. break;
  1438. case XFS_REFCOUNT_ADJUST_COW_FREE:
  1439. /* Removing a CoW reservation, there should be one extent. */
  1440. if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
  1441. error = -EFSCORRUPTED;
  1442. goto out_error;
  1443. }
  1444. if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
  1445. error = -EFSCORRUPTED;
  1446. goto out_error;
  1447. }
  1448. if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
  1449. error = -EFSCORRUPTED;
  1450. goto out_error;
  1451. }
  1452. ext.rc_refcount = 0;
  1453. trace_xfs_refcount_modify_extent(cur->bc_mp,
  1454. cur->bc_ag.pag->pag_agno, &ext);
  1455. error = xfs_refcount_delete(cur, &found_rec);
  1456. if (error)
  1457. goto out_error;
  1458. if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
  1459. error = -EFSCORRUPTED;
  1460. goto out_error;
  1461. }
  1462. break;
  1463. default:
  1464. ASSERT(0);
  1465. }
  1466. return error;
  1467. out_error:
  1468. trace_xfs_refcount_modify_extent_error(cur->bc_mp,
  1469. cur->bc_ag.pag->pag_agno, error, _RET_IP_);
  1470. return error;
  1471. }
  1472. /*
  1473. * Add or remove refcount btree entries for CoW reservations.
  1474. */
  1475. STATIC int
  1476. xfs_refcount_adjust_cow(
  1477. struct xfs_btree_cur *cur,
  1478. xfs_agblock_t agbno,
  1479. xfs_extlen_t aglen,
  1480. enum xfs_refc_adjust_op adj)
  1481. {
  1482. bool shape_changed;
  1483. int error;
  1484. /*
  1485. * Ensure that no rcextents cross the boundary of the adjustment range.
  1486. */
  1487. error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
  1488. agbno, &shape_changed);
  1489. if (error)
  1490. goto out_error;
  1491. error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
  1492. agbno + aglen, &shape_changed);
  1493. if (error)
  1494. goto out_error;
  1495. /*
  1496. * Try to merge with the left or right extents of the range.
  1497. */
  1498. error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_COW, &agbno,
  1499. &aglen, adj, &shape_changed);
  1500. if (error)
  1501. goto out_error;
  1502. /* Now that we've taken care of the ends, adjust the middle extents */
  1503. error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
  1504. if (error)
  1505. goto out_error;
  1506. return 0;
  1507. out_error:
  1508. trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.pag->pag_agno,
  1509. error, _RET_IP_);
  1510. return error;
  1511. }
  1512. /*
  1513. * Record a CoW allocation in the refcount btree.
  1514. */
  1515. STATIC int
  1516. __xfs_refcount_cow_alloc(
  1517. struct xfs_btree_cur *rcur,
  1518. xfs_agblock_t agbno,
  1519. xfs_extlen_t aglen)
  1520. {
  1521. trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
  1522. agbno, aglen);
  1523. /* Add refcount btree reservation */
  1524. return xfs_refcount_adjust_cow(rcur, agbno, aglen,
  1525. XFS_REFCOUNT_ADJUST_COW_ALLOC);
  1526. }
  1527. /*
  1528. * Remove a CoW allocation from the refcount btree.
  1529. */
  1530. STATIC int
  1531. __xfs_refcount_cow_free(
  1532. struct xfs_btree_cur *rcur,
  1533. xfs_agblock_t agbno,
  1534. xfs_extlen_t aglen)
  1535. {
  1536. trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.pag->pag_agno,
  1537. agbno, aglen);
  1538. /* Remove refcount btree reservation */
  1539. return xfs_refcount_adjust_cow(rcur, agbno, aglen,
  1540. XFS_REFCOUNT_ADJUST_COW_FREE);
  1541. }
  1542. /* Record a CoW staging extent in the refcount btree. */
  1543. void
  1544. xfs_refcount_alloc_cow_extent(
  1545. struct xfs_trans *tp,
  1546. xfs_fsblock_t fsb,
  1547. xfs_extlen_t len)
  1548. {
  1549. struct xfs_mount *mp = tp->t_mountp;
  1550. if (!xfs_has_reflink(mp))
  1551. return;
  1552. __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
  1553. /* Add rmap entry */
  1554. xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
  1555. XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
  1556. }
  1557. /* Forget a CoW staging event in the refcount btree. */
  1558. void
  1559. xfs_refcount_free_cow_extent(
  1560. struct xfs_trans *tp,
  1561. xfs_fsblock_t fsb,
  1562. xfs_extlen_t len)
  1563. {
  1564. struct xfs_mount *mp = tp->t_mountp;
  1565. if (!xfs_has_reflink(mp))
  1566. return;
  1567. /* Remove rmap entry */
  1568. xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
  1569. XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
  1570. __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
  1571. }
  1572. struct xfs_refcount_recovery {
  1573. struct list_head rr_list;
  1574. struct xfs_refcount_irec rr_rrec;
  1575. };
  1576. /* Stuff an extent on the recovery list. */
  1577. STATIC int
  1578. xfs_refcount_recover_extent(
  1579. struct xfs_btree_cur *cur,
  1580. const union xfs_btree_rec *rec,
  1581. void *priv)
  1582. {
  1583. struct list_head *debris = priv;
  1584. struct xfs_refcount_recovery *rr;
  1585. if (XFS_IS_CORRUPT(cur->bc_mp,
  1586. be32_to_cpu(rec->refc.rc_refcount) != 1))
  1587. return -EFSCORRUPTED;
  1588. rr = kmalloc(sizeof(struct xfs_refcount_recovery),
  1589. GFP_KERNEL | __GFP_NOFAIL);
  1590. INIT_LIST_HEAD(&rr->rr_list);
  1591. xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
  1592. if (XFS_IS_CORRUPT(cur->bc_mp,
  1593. rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) {
  1594. kfree(rr);
  1595. return -EFSCORRUPTED;
  1596. }
  1597. list_add_tail(&rr->rr_list, debris);
  1598. return 0;
  1599. }
  1600. /* Find and remove leftover CoW reservations. */
  1601. int
  1602. xfs_refcount_recover_cow_leftovers(
  1603. struct xfs_mount *mp,
  1604. struct xfs_perag *pag)
  1605. {
  1606. struct xfs_trans *tp;
  1607. struct xfs_btree_cur *cur;
  1608. struct xfs_buf *agbp;
  1609. struct xfs_refcount_recovery *rr, *n;
  1610. struct list_head debris;
  1611. union xfs_btree_irec low;
  1612. union xfs_btree_irec high;
  1613. xfs_fsblock_t fsb;
  1614. int error;
  1615. /* reflink filesystems mustn't have AGs larger than 2^31-1 blocks */
  1616. BUILD_BUG_ON(XFS_MAX_CRC_AG_BLOCKS >= XFS_REFC_COWFLAG);
  1617. if (mp->m_sb.sb_agblocks > XFS_MAX_CRC_AG_BLOCKS)
  1618. return -EOPNOTSUPP;
  1619. INIT_LIST_HEAD(&debris);
  1620. /*
  1621. * In this first part, we use an empty transaction to gather up
  1622. * all the leftover CoW extents so that we can subsequently
  1623. * delete them. The empty transaction is used to avoid
  1624. * a buffer lock deadlock if there happens to be a loop in the
  1625. * refcountbt because we're allowed to re-grab a buffer that is
  1626. * already attached to our transaction. When we're done
  1627. * recording the CoW debris we cancel the (empty) transaction
  1628. * and everything goes away cleanly.
  1629. */
  1630. error = xfs_trans_alloc_empty(mp, &tp);
  1631. if (error)
  1632. return error;
  1633. error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
  1634. if (error)
  1635. goto out_trans;
  1636. cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
  1637. /* Find all the leftover CoW staging extents. */
  1638. memset(&low, 0, sizeof(low));
  1639. memset(&high, 0, sizeof(high));
  1640. low.rc.rc_domain = high.rc.rc_domain = XFS_REFC_DOMAIN_COW;
  1641. high.rc.rc_startblock = -1U;
  1642. error = xfs_btree_query_range(cur, &low, &high,
  1643. xfs_refcount_recover_extent, &debris);
  1644. xfs_btree_del_cursor(cur, error);
  1645. xfs_trans_brelse(tp, agbp);
  1646. xfs_trans_cancel(tp);
  1647. if (error)
  1648. goto out_free;
  1649. /* Now iterate the list to free the leftovers */
  1650. list_for_each_entry_safe(rr, n, &debris, rr_list) {
  1651. /* Set up transaction. */
  1652. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
  1653. if (error)
  1654. goto out_free;
  1655. trace_xfs_refcount_recover_extent(mp, pag->pag_agno,
  1656. &rr->rr_rrec);
  1657. /* Free the orphan record */
  1658. fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno,
  1659. rr->rr_rrec.rc_startblock);
  1660. xfs_refcount_free_cow_extent(tp, fsb,
  1661. rr->rr_rrec.rc_blockcount);
  1662. /* Free the block. */
  1663. xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
  1664. error = xfs_trans_commit(tp);
  1665. if (error)
  1666. goto out_free;
  1667. list_del(&rr->rr_list);
  1668. kfree(rr);
  1669. }
  1670. return error;
  1671. out_trans:
  1672. xfs_trans_cancel(tp);
  1673. out_free:
  1674. /* Free the leftover list */
  1675. list_for_each_entry_safe(rr, n, &debris, rr_list) {
  1676. list_del(&rr->rr_list);
  1677. kfree(rr);
  1678. }
  1679. return error;
  1680. }
  1681. /* Is there a record covering a given extent? */
  1682. int
  1683. xfs_refcount_has_record(
  1684. struct xfs_btree_cur *cur,
  1685. enum xfs_refc_domain domain,
  1686. xfs_agblock_t bno,
  1687. xfs_extlen_t len,
  1688. bool *exists)
  1689. {
  1690. union xfs_btree_irec low;
  1691. union xfs_btree_irec high;
  1692. memset(&low, 0, sizeof(low));
  1693. low.rc.rc_startblock = bno;
  1694. memset(&high, 0xFF, sizeof(high));
  1695. high.rc.rc_startblock = bno + len - 1;
  1696. low.rc.rc_domain = high.rc.rc_domain = domain;
  1697. return xfs_btree_has_record(cur, &low, &high, exists);
  1698. }
  1699. int __init
  1700. xfs_refcount_intent_init_cache(void)
  1701. {
  1702. xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
  1703. sizeof(struct xfs_refcount_intent),
  1704. 0, 0, NULL);
  1705. return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
  1706. }
  1707. void
  1708. xfs_refcount_intent_destroy_cache(void)
  1709. {
  1710. kmem_cache_destroy(xfs_refcount_intent_cache);
  1711. xfs_refcount_intent_cache = NULL;
  1712. }