xfs_extent_busy.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  4. * Copyright (c) 2010 David Chinner.
  5. * Copyright (c) 2011 Christoph Hellwig.
  6. * All Rights Reserved.
  7. */
  8. #include "xfs.h"
  9. #include "xfs_fs.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_trans_resv.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_alloc.h"
  16. #include "xfs_extent_busy.h"
  17. #include "xfs_trace.h"
  18. #include "xfs_trans.h"
  19. #include "xfs_log.h"
  20. #include "xfs_ag.h"
  21. void
  22. xfs_extent_busy_insert(
  23. struct xfs_trans *tp,
  24. struct xfs_perag *pag,
  25. xfs_agblock_t bno,
  26. xfs_extlen_t len,
  27. unsigned int flags)
  28. {
  29. struct xfs_extent_busy *new;
  30. struct xfs_extent_busy *busyp;
  31. struct rb_node **rbp;
  32. struct rb_node *parent = NULL;
  33. new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
  34. new->agno = pag->pag_agno;
  35. new->bno = bno;
  36. new->length = len;
  37. INIT_LIST_HEAD(&new->list);
  38. new->flags = flags;
  39. /* trace before insert to be able to see failed inserts */
  40. trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
  41. spin_lock(&pag->pagb_lock);
  42. rbp = &pag->pagb_tree.rb_node;
  43. while (*rbp) {
  44. parent = *rbp;
  45. busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
  46. if (new->bno < busyp->bno) {
  47. rbp = &(*rbp)->rb_left;
  48. ASSERT(new->bno + new->length <= busyp->bno);
  49. } else if (new->bno > busyp->bno) {
  50. rbp = &(*rbp)->rb_right;
  51. ASSERT(bno >= busyp->bno + busyp->length);
  52. } else {
  53. ASSERT(0);
  54. }
  55. }
  56. rb_link_node(&new->rb_node, parent, rbp);
  57. rb_insert_color(&new->rb_node, &pag->pagb_tree);
  58. list_add(&new->list, &tp->t_busy);
  59. spin_unlock(&pag->pagb_lock);
  60. }
  61. /*
  62. * Search for a busy extent within the range of the extent we are about to
  63. * allocate. You need to be holding the busy extent tree lock when calling
  64. * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
  65. * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
  66. * match. This is done so that a non-zero return indicates an overlap that
  67. * will require a synchronous transaction, but it can still be
  68. * used to distinguish between a partial or exact match.
  69. */
  70. int
  71. xfs_extent_busy_search(
  72. struct xfs_mount *mp,
  73. struct xfs_perag *pag,
  74. xfs_agblock_t bno,
  75. xfs_extlen_t len)
  76. {
  77. struct rb_node *rbp;
  78. struct xfs_extent_busy *busyp;
  79. int match = 0;
  80. /* find closest start bno overlap */
  81. spin_lock(&pag->pagb_lock);
  82. rbp = pag->pagb_tree.rb_node;
  83. while (rbp) {
  84. busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
  85. if (bno < busyp->bno) {
  86. /* may overlap, but exact start block is lower */
  87. if (bno + len > busyp->bno)
  88. match = -1;
  89. rbp = rbp->rb_left;
  90. } else if (bno > busyp->bno) {
  91. /* may overlap, but exact start block is higher */
  92. if (bno < busyp->bno + busyp->length)
  93. match = -1;
  94. rbp = rbp->rb_right;
  95. } else {
  96. /* bno matches busyp, length determines exact match */
  97. match = (busyp->length == len) ? 1 : -1;
  98. break;
  99. }
  100. }
  101. spin_unlock(&pag->pagb_lock);
  102. return match;
  103. }
  104. /*
  105. * The found free extent [fbno, fend] overlaps part or all of the given busy
  106. * extent. If the overlap covers the beginning, the end, or all of the busy
  107. * extent, the overlapping portion can be made unbusy and used for the
  108. * allocation. We can't split a busy extent because we can't modify a
  109. * transaction/CIL context busy list, but we can update an entry's block
  110. * number or length.
  111. *
  112. * Returns true if the extent can safely be reused, or false if the search
  113. * needs to be restarted.
  114. */
  115. STATIC bool
  116. xfs_extent_busy_update_extent(
  117. struct xfs_mount *mp,
  118. struct xfs_perag *pag,
  119. struct xfs_extent_busy *busyp,
  120. xfs_agblock_t fbno,
  121. xfs_extlen_t flen,
  122. bool userdata) __releases(&pag->pagb_lock)
  123. __acquires(&pag->pagb_lock)
  124. {
  125. xfs_agblock_t fend = fbno + flen;
  126. xfs_agblock_t bbno = busyp->bno;
  127. xfs_agblock_t bend = bbno + busyp->length;
  128. /*
  129. * This extent is currently being discarded. Give the thread
  130. * performing the discard a chance to mark the extent unbusy
  131. * and retry.
  132. */
  133. if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
  134. spin_unlock(&pag->pagb_lock);
  135. delay(1);
  136. spin_lock(&pag->pagb_lock);
  137. return false;
  138. }
  139. /*
  140. * If there is a busy extent overlapping a user allocation, we have
  141. * no choice but to force the log and retry the search.
  142. *
  143. * Fortunately this does not happen during normal operation, but
  144. * only if the filesystem is very low on space and has to dip into
  145. * the AGFL for normal allocations.
  146. */
  147. if (userdata)
  148. goto out_force_log;
  149. if (bbno < fbno && bend > fend) {
  150. /*
  151. * Case 1:
  152. * bbno bend
  153. * +BBBBBBBBBBBBBBBBB+
  154. * +---------+
  155. * fbno fend
  156. */
  157. /*
  158. * We would have to split the busy extent to be able to track
  159. * it correct, which we cannot do because we would have to
  160. * modify the list of busy extents attached to the transaction
  161. * or CIL context, which is immutable.
  162. *
  163. * Force out the log to clear the busy extent and retry the
  164. * search.
  165. */
  166. goto out_force_log;
  167. } else if (bbno >= fbno && bend <= fend) {
  168. /*
  169. * Case 2:
  170. * bbno bend
  171. * +BBBBBBBBBBBBBBBBB+
  172. * +-----------------+
  173. * fbno fend
  174. *
  175. * Case 3:
  176. * bbno bend
  177. * +BBBBBBBBBBBBBBBBB+
  178. * +--------------------------+
  179. * fbno fend
  180. *
  181. * Case 4:
  182. * bbno bend
  183. * +BBBBBBBBBBBBBBBBB+
  184. * +--------------------------+
  185. * fbno fend
  186. *
  187. * Case 5:
  188. * bbno bend
  189. * +BBBBBBBBBBBBBBBBB+
  190. * +-----------------------------------+
  191. * fbno fend
  192. *
  193. */
  194. /*
  195. * The busy extent is fully covered by the extent we are
  196. * allocating, and can simply be removed from the rbtree.
  197. * However we cannot remove it from the immutable list
  198. * tracking busy extents in the transaction or CIL context,
  199. * so set the length to zero to mark it invalid.
  200. *
  201. * We also need to restart the busy extent search from the
  202. * tree root, because erasing the node can rearrange the
  203. * tree topology.
  204. */
  205. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  206. busyp->length = 0;
  207. return false;
  208. } else if (fend < bend) {
  209. /*
  210. * Case 6:
  211. * bbno bend
  212. * +BBBBBBBBBBBBBBBBB+
  213. * +---------+
  214. * fbno fend
  215. *
  216. * Case 7:
  217. * bbno bend
  218. * +BBBBBBBBBBBBBBBBB+
  219. * +------------------+
  220. * fbno fend
  221. *
  222. */
  223. busyp->bno = fend;
  224. } else if (bbno < fbno) {
  225. /*
  226. * Case 8:
  227. * bbno bend
  228. * +BBBBBBBBBBBBBBBBB+
  229. * +-------------+
  230. * fbno fend
  231. *
  232. * Case 9:
  233. * bbno bend
  234. * +BBBBBBBBBBBBBBBBB+
  235. * +----------------------+
  236. * fbno fend
  237. */
  238. busyp->length = fbno - busyp->bno;
  239. } else {
  240. ASSERT(0);
  241. }
  242. trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
  243. return true;
  244. out_force_log:
  245. spin_unlock(&pag->pagb_lock);
  246. xfs_log_force(mp, XFS_LOG_SYNC);
  247. trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
  248. spin_lock(&pag->pagb_lock);
  249. return false;
  250. }
  251. /*
  252. * For a given extent [fbno, flen], make sure we can reuse it safely.
  253. */
  254. void
  255. xfs_extent_busy_reuse(
  256. struct xfs_mount *mp,
  257. struct xfs_perag *pag,
  258. xfs_agblock_t fbno,
  259. xfs_extlen_t flen,
  260. bool userdata)
  261. {
  262. struct rb_node *rbp;
  263. ASSERT(flen > 0);
  264. spin_lock(&pag->pagb_lock);
  265. restart:
  266. rbp = pag->pagb_tree.rb_node;
  267. while (rbp) {
  268. struct xfs_extent_busy *busyp =
  269. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  270. xfs_agblock_t bbno = busyp->bno;
  271. xfs_agblock_t bend = bbno + busyp->length;
  272. if (fbno + flen <= bbno) {
  273. rbp = rbp->rb_left;
  274. continue;
  275. } else if (fbno >= bend) {
  276. rbp = rbp->rb_right;
  277. continue;
  278. }
  279. if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
  280. userdata))
  281. goto restart;
  282. }
  283. spin_unlock(&pag->pagb_lock);
  284. }
  285. /*
  286. * For a given extent [fbno, flen], search the busy extent list to find a
  287. * subset of the extent that is not busy. If *rlen is smaller than
  288. * args->minlen no suitable extent could be found, and the higher level
  289. * code needs to force out the log and retry the allocation.
  290. *
  291. * Return the current busy generation for the AG if the extent is busy. This
  292. * value can be used to wait for at least one of the currently busy extents
  293. * to be cleared. Note that the busy list is not guaranteed to be empty after
  294. * the gen is woken. The state of a specific extent must always be confirmed
  295. * with another call to xfs_extent_busy_trim() before it can be used.
  296. */
  297. bool
  298. xfs_extent_busy_trim(
  299. struct xfs_alloc_arg *args,
  300. xfs_agblock_t *bno,
  301. xfs_extlen_t *len,
  302. unsigned *busy_gen)
  303. {
  304. xfs_agblock_t fbno;
  305. xfs_extlen_t flen;
  306. struct rb_node *rbp;
  307. bool ret = false;
  308. ASSERT(*len > 0);
  309. spin_lock(&args->pag->pagb_lock);
  310. fbno = *bno;
  311. flen = *len;
  312. rbp = args->pag->pagb_tree.rb_node;
  313. while (rbp && flen >= args->minlen) {
  314. struct xfs_extent_busy *busyp =
  315. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  316. xfs_agblock_t fend = fbno + flen;
  317. xfs_agblock_t bbno = busyp->bno;
  318. xfs_agblock_t bend = bbno + busyp->length;
  319. if (fend <= bbno) {
  320. rbp = rbp->rb_left;
  321. continue;
  322. } else if (fbno >= bend) {
  323. rbp = rbp->rb_right;
  324. continue;
  325. }
  326. if (bbno <= fbno) {
  327. /* start overlap */
  328. /*
  329. * Case 1:
  330. * bbno bend
  331. * +BBBBBBBBBBBBBBBBB+
  332. * +---------+
  333. * fbno fend
  334. *
  335. * Case 2:
  336. * bbno bend
  337. * +BBBBBBBBBBBBBBBBB+
  338. * +-------------+
  339. * fbno fend
  340. *
  341. * Case 3:
  342. * bbno bend
  343. * +BBBBBBBBBBBBBBBBB+
  344. * +-------------+
  345. * fbno fend
  346. *
  347. * Case 4:
  348. * bbno bend
  349. * +BBBBBBBBBBBBBBBBB+
  350. * +-----------------+
  351. * fbno fend
  352. *
  353. * No unbusy region in extent, return failure.
  354. */
  355. if (fend <= bend)
  356. goto fail;
  357. /*
  358. * Case 5:
  359. * bbno bend
  360. * +BBBBBBBBBBBBBBBBB+
  361. * +----------------------+
  362. * fbno fend
  363. *
  364. * Case 6:
  365. * bbno bend
  366. * +BBBBBBBBBBBBBBBBB+
  367. * +--------------------------+
  368. * fbno fend
  369. *
  370. * Needs to be trimmed to:
  371. * +-------+
  372. * fbno fend
  373. */
  374. fbno = bend;
  375. } else if (bend >= fend) {
  376. /* end overlap */
  377. /*
  378. * Case 7:
  379. * bbno bend
  380. * +BBBBBBBBBBBBBBBBB+
  381. * +------------------+
  382. * fbno fend
  383. *
  384. * Case 8:
  385. * bbno bend
  386. * +BBBBBBBBBBBBBBBBB+
  387. * +--------------------------+
  388. * fbno fend
  389. *
  390. * Needs to be trimmed to:
  391. * +-------+
  392. * fbno fend
  393. */
  394. fend = bbno;
  395. } else {
  396. /* middle overlap */
  397. /*
  398. * Case 9:
  399. * bbno bend
  400. * +BBBBBBBBBBBBBBBBB+
  401. * +-----------------------------------+
  402. * fbno fend
  403. *
  404. * Can be trimmed to:
  405. * +-------+ OR +-------+
  406. * fbno fend fbno fend
  407. *
  408. * Backward allocation leads to significant
  409. * fragmentation of directories, which degrades
  410. * directory performance, therefore we always want to
  411. * choose the option that produces forward allocation
  412. * patterns.
  413. * Preferring the lower bno extent will make the next
  414. * request use "fend" as the start of the next
  415. * allocation; if the segment is no longer busy at
  416. * that point, we'll get a contiguous allocation, but
  417. * even if it is still busy, we will get a forward
  418. * allocation.
  419. * We try to avoid choosing the segment at "bend",
  420. * because that can lead to the next allocation
  421. * taking the segment at "fbno", which would be a
  422. * backward allocation. We only use the segment at
  423. * "fbno" if it is much larger than the current
  424. * requested size, because in that case there's a
  425. * good chance subsequent allocations will be
  426. * contiguous.
  427. */
  428. if (bbno - fbno >= args->maxlen) {
  429. /* left candidate fits perfect */
  430. fend = bbno;
  431. } else if (fend - bend >= args->maxlen * 4) {
  432. /* right candidate has enough free space */
  433. fbno = bend;
  434. } else if (bbno - fbno >= args->minlen) {
  435. /* left candidate fits minimum requirement */
  436. fend = bbno;
  437. } else {
  438. goto fail;
  439. }
  440. }
  441. flen = fend - fbno;
  442. }
  443. out:
  444. if (fbno != *bno || flen != *len) {
  445. trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
  446. fbno, flen);
  447. *bno = fbno;
  448. *len = flen;
  449. *busy_gen = args->pag->pagb_gen;
  450. ret = true;
  451. }
  452. spin_unlock(&args->pag->pagb_lock);
  453. return ret;
  454. fail:
  455. /*
  456. * Return a zero extent length as failure indications. All callers
  457. * re-check if the trimmed extent satisfies the minlen requirement.
  458. */
  459. flen = 0;
  460. goto out;
  461. }
  462. STATIC void
  463. xfs_extent_busy_clear_one(
  464. struct xfs_mount *mp,
  465. struct xfs_perag *pag,
  466. struct xfs_extent_busy *busyp)
  467. {
  468. if (busyp->length) {
  469. trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
  470. busyp->length);
  471. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  472. }
  473. list_del_init(&busyp->list);
  474. kmem_free(busyp);
  475. }
  476. static void
  477. xfs_extent_busy_put_pag(
  478. struct xfs_perag *pag,
  479. bool wakeup)
  480. __releases(pag->pagb_lock)
  481. {
  482. if (wakeup) {
  483. pag->pagb_gen++;
  484. wake_up_all(&pag->pagb_wait);
  485. }
  486. spin_unlock(&pag->pagb_lock);
  487. xfs_perag_put(pag);
  488. }
  489. /*
  490. * Remove all extents on the passed in list from the busy extents tree.
  491. * If do_discard is set skip extents that need to be discarded, and mark
  492. * these as undergoing a discard operation instead.
  493. */
  494. void
  495. xfs_extent_busy_clear(
  496. struct xfs_mount *mp,
  497. struct list_head *list,
  498. bool do_discard)
  499. {
  500. struct xfs_extent_busy *busyp, *n;
  501. struct xfs_perag *pag = NULL;
  502. xfs_agnumber_t agno = NULLAGNUMBER;
  503. bool wakeup = false;
  504. list_for_each_entry_safe(busyp, n, list, list) {
  505. if (busyp->agno != agno) {
  506. if (pag)
  507. xfs_extent_busy_put_pag(pag, wakeup);
  508. agno = busyp->agno;
  509. pag = xfs_perag_get(mp, agno);
  510. spin_lock(&pag->pagb_lock);
  511. wakeup = false;
  512. }
  513. if (do_discard && busyp->length &&
  514. !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
  515. busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
  516. } else {
  517. xfs_extent_busy_clear_one(mp, pag, busyp);
  518. wakeup = true;
  519. }
  520. }
  521. if (pag)
  522. xfs_extent_busy_put_pag(pag, wakeup);
  523. }
  524. /*
  525. * Flush out all busy extents for this AG.
  526. */
  527. void
  528. xfs_extent_busy_flush(
  529. struct xfs_mount *mp,
  530. struct xfs_perag *pag,
  531. unsigned busy_gen)
  532. {
  533. DEFINE_WAIT (wait);
  534. int error;
  535. error = xfs_log_force(mp, XFS_LOG_SYNC);
  536. if (error)
  537. return;
  538. do {
  539. prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
  540. if (busy_gen != READ_ONCE(pag->pagb_gen))
  541. break;
  542. schedule();
  543. } while (1);
  544. finish_wait(&pag->pagb_wait, &wait);
  545. }
  546. void
  547. xfs_extent_busy_wait_all(
  548. struct xfs_mount *mp)
  549. {
  550. struct xfs_perag *pag;
  551. DEFINE_WAIT (wait);
  552. xfs_agnumber_t agno;
  553. for_each_perag(mp, agno, pag) {
  554. do {
  555. prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
  556. if (RB_EMPTY_ROOT(&pag->pagb_tree))
  557. break;
  558. schedule();
  559. } while (1);
  560. finish_wait(&pag->pagb_wait, &wait);
  561. }
  562. }
  563. /*
  564. * Callback for list_sort to sort busy extents by the AG they reside in.
  565. */
  566. int
  567. xfs_extent_busy_ag_cmp(
  568. void *priv,
  569. const struct list_head *l1,
  570. const struct list_head *l2)
  571. {
  572. struct xfs_extent_busy *b1 =
  573. container_of(l1, struct xfs_extent_busy, list);
  574. struct xfs_extent_busy *b2 =
  575. container_of(l2, struct xfs_extent_busy, list);
  576. s32 diff;
  577. diff = b1->agno - b2->agno;
  578. if (!diff)
  579. diff = b1->bno - b2->bno;
  580. return diff;
  581. }