journal.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bcache journalling code, for btree insertions
  4. *
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include "bcache.h"
  8. #include "btree.h"
  9. #include "debug.h"
  10. #include "extents.h"
  11. #include <trace/events/bcache.h>
  12. /*
  13. * Journal replay/recovery:
  14. *
  15. * This code is all driven from run_cache_set(); we first read the journal
  16. * entries, do some other stuff, then we mark all the keys in the journal
  17. * entries (same as garbage collection would), then we replay them - reinserting
  18. * them into the cache in precisely the same order as they appear in the
  19. * journal.
  20. *
  21. * We only journal keys that go in leaf nodes, which simplifies things quite a
  22. * bit.
  23. */
  24. static void journal_read_endio(struct bio *bio)
  25. {
  26. struct closure *cl = bio->bi_private;
  27. closure_put(cl);
  28. }
  29. static int journal_read_bucket(struct cache *ca, struct list_head *list,
  30. unsigned int bucket_index)
  31. {
  32. struct journal_device *ja = &ca->journal;
  33. struct bio *bio = &ja->bio;
  34. struct journal_replay *i;
  35. struct jset *j, *data = ca->set->journal.w[0].data;
  36. struct closure cl;
  37. unsigned int len, left, offset = 0;
  38. int ret = 0;
  39. sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
  40. closure_init_stack(&cl);
  41. pr_debug("reading %u\n", bucket_index);
  42. while (offset < ca->sb.bucket_size) {
  43. reread: left = ca->sb.bucket_size - offset;
  44. len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
  45. bio_reset(bio, ca->bdev, REQ_OP_READ);
  46. bio->bi_iter.bi_sector = bucket + offset;
  47. bio->bi_iter.bi_size = len << 9;
  48. bio->bi_end_io = journal_read_endio;
  49. bio->bi_private = &cl;
  50. bch_bio_map(bio, data);
  51. closure_bio_submit(ca->set, bio, &cl);
  52. closure_sync(&cl);
  53. /* This function could be simpler now since we no longer write
  54. * journal entries that overlap bucket boundaries; this means
  55. * the start of a bucket will always have a valid journal entry
  56. * if it has any journal entries at all.
  57. */
  58. j = data;
  59. while (len) {
  60. struct list_head *where;
  61. size_t blocks, bytes = set_bytes(j);
  62. if (j->magic != jset_magic(&ca->sb)) {
  63. pr_debug("%u: bad magic\n", bucket_index);
  64. return ret;
  65. }
  66. if (bytes > left << 9 ||
  67. bytes > PAGE_SIZE << JSET_BITS) {
  68. pr_info("%u: too big, %zu bytes, offset %u\n",
  69. bucket_index, bytes, offset);
  70. return ret;
  71. }
  72. if (bytes > len << 9)
  73. goto reread;
  74. if (j->csum != csum_set(j)) {
  75. pr_info("%u: bad csum, %zu bytes, offset %u\n",
  76. bucket_index, bytes, offset);
  77. return ret;
  78. }
  79. blocks = set_blocks(j, block_bytes(ca));
  80. /*
  81. * Nodes in 'list' are in linear increasing order of
  82. * i->j.seq, the node on head has the smallest (oldest)
  83. * journal seq, the node on tail has the biggest
  84. * (latest) journal seq.
  85. */
  86. /*
  87. * Check from the oldest jset for last_seq. If
  88. * i->j.seq < j->last_seq, it means the oldest jset
  89. * in list is expired and useless, remove it from
  90. * this list. Otherwise, j is a candidate jset for
  91. * further following checks.
  92. */
  93. while (!list_empty(list)) {
  94. i = list_first_entry(list,
  95. struct journal_replay, list);
  96. if (i->j.seq >= j->last_seq)
  97. break;
  98. list_del(&i->list);
  99. kfree(i);
  100. }
  101. /* iterate list in reverse order (from latest jset) */
  102. list_for_each_entry_reverse(i, list, list) {
  103. if (j->seq == i->j.seq)
  104. goto next_set;
  105. /*
  106. * if j->seq is less than any i->j.last_seq
  107. * in list, j is an expired and useless jset.
  108. */
  109. if (j->seq < i->j.last_seq)
  110. goto next_set;
  111. /*
  112. * 'where' points to first jset in list which
  113. * is elder then j.
  114. */
  115. if (j->seq > i->j.seq) {
  116. where = &i->list;
  117. goto add;
  118. }
  119. }
  120. where = list;
  121. add:
  122. i = kmalloc(offsetof(struct journal_replay, j) +
  123. bytes, GFP_KERNEL);
  124. if (!i)
  125. return -ENOMEM;
  126. unsafe_memcpy(&i->j, j, bytes,
  127. /* "bytes" was calculated by set_bytes() above */);
  128. /* Add to the location after 'where' points to */
  129. list_add(&i->list, where);
  130. ret = 1;
  131. if (j->seq > ja->seq[bucket_index])
  132. ja->seq[bucket_index] = j->seq;
  133. next_set:
  134. offset += blocks * ca->sb.block_size;
  135. len -= blocks * ca->sb.block_size;
  136. j = ((void *) j) + blocks * block_bytes(ca);
  137. }
  138. }
  139. return ret;
  140. }
  141. int bch_journal_read(struct cache_set *c, struct list_head *list)
  142. {
  143. #define read_bucket(b) \
  144. ({ \
  145. ret = journal_read_bucket(ca, list, b); \
  146. __set_bit(b, bitmap); \
  147. if (ret < 0) \
  148. return ret; \
  149. ret; \
  150. })
  151. struct cache *ca = c->cache;
  152. int ret = 0;
  153. struct journal_device *ja = &ca->journal;
  154. DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
  155. unsigned int i, l, r, m;
  156. uint64_t seq;
  157. bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
  158. pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
  159. /*
  160. * Read journal buckets ordered by golden ratio hash to quickly
  161. * find a sequence of buckets with valid journal entries
  162. */
  163. for (i = 0; i < ca->sb.njournal_buckets; i++) {
  164. /*
  165. * We must try the index l with ZERO first for
  166. * correctness due to the scenario that the journal
  167. * bucket is circular buffer which might have wrapped
  168. */
  169. l = (i * 2654435769U) % ca->sb.njournal_buckets;
  170. if (test_bit(l, bitmap))
  171. break;
  172. if (read_bucket(l))
  173. goto bsearch;
  174. }
  175. /*
  176. * If that fails, check all the buckets we haven't checked
  177. * already
  178. */
  179. pr_debug("falling back to linear search\n");
  180. for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
  181. if (read_bucket(l))
  182. goto bsearch;
  183. /* no journal entries on this device? */
  184. if (l == ca->sb.njournal_buckets)
  185. goto out;
  186. bsearch:
  187. BUG_ON(list_empty(list));
  188. /* Binary search */
  189. m = l;
  190. r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
  191. pr_debug("starting binary search, l %u r %u\n", l, r);
  192. while (l + 1 < r) {
  193. seq = list_entry(list->prev, struct journal_replay,
  194. list)->j.seq;
  195. m = (l + r) >> 1;
  196. read_bucket(m);
  197. if (seq != list_entry(list->prev, struct journal_replay,
  198. list)->j.seq)
  199. l = m;
  200. else
  201. r = m;
  202. }
  203. /*
  204. * Read buckets in reverse order until we stop finding more
  205. * journal entries
  206. */
  207. pr_debug("finishing up: m %u njournal_buckets %u\n",
  208. m, ca->sb.njournal_buckets);
  209. l = m;
  210. while (1) {
  211. if (!l--)
  212. l = ca->sb.njournal_buckets - 1;
  213. if (l == m)
  214. break;
  215. if (test_bit(l, bitmap))
  216. continue;
  217. if (!read_bucket(l))
  218. break;
  219. }
  220. seq = 0;
  221. for (i = 0; i < ca->sb.njournal_buckets; i++)
  222. if (ja->seq[i] > seq) {
  223. seq = ja->seq[i];
  224. /*
  225. * When journal_reclaim() goes to allocate for
  226. * the first time, it'll use the bucket after
  227. * ja->cur_idx
  228. */
  229. ja->cur_idx = i;
  230. ja->last_idx = ja->discard_idx = (i + 1) %
  231. ca->sb.njournal_buckets;
  232. }
  233. out:
  234. if (!list_empty(list))
  235. c->journal.seq = list_entry(list->prev,
  236. struct journal_replay,
  237. list)->j.seq;
  238. return 0;
  239. #undef read_bucket
  240. }
  241. void bch_journal_mark(struct cache_set *c, struct list_head *list)
  242. {
  243. atomic_t p = { 0 };
  244. struct bkey *k;
  245. struct journal_replay *i;
  246. struct journal *j = &c->journal;
  247. uint64_t last = j->seq;
  248. /*
  249. * journal.pin should never fill up - we never write a journal
  250. * entry when it would fill up. But if for some reason it does, we
  251. * iterate over the list in reverse order so that we can just skip that
  252. * refcount instead of bugging.
  253. */
  254. list_for_each_entry_reverse(i, list, list) {
  255. BUG_ON(last < i->j.seq);
  256. i->pin = NULL;
  257. while (last-- != i->j.seq)
  258. if (fifo_free(&j->pin) > 1) {
  259. fifo_push_front(&j->pin, p);
  260. atomic_set(&fifo_front(&j->pin), 0);
  261. }
  262. if (fifo_free(&j->pin) > 1) {
  263. fifo_push_front(&j->pin, p);
  264. i->pin = &fifo_front(&j->pin);
  265. atomic_set(i->pin, 1);
  266. }
  267. for (k = i->j.start;
  268. k < bset_bkey_last(&i->j);
  269. k = bkey_next(k))
  270. if (!__bch_extent_invalid(c, k)) {
  271. unsigned int j;
  272. for (j = 0; j < KEY_PTRS(k); j++)
  273. if (ptr_available(c, k, j))
  274. atomic_inc(&PTR_BUCKET(c, k, j)->pin);
  275. bch_initial_mark_key(c, 0, k);
  276. }
  277. }
  278. }
  279. static bool is_discard_enabled(struct cache_set *s)
  280. {
  281. struct cache *ca = s->cache;
  282. if (ca->discard)
  283. return true;
  284. return false;
  285. }
  286. int bch_journal_replay(struct cache_set *s, struct list_head *list)
  287. {
  288. int ret = 0, keys = 0, entries = 0;
  289. struct bkey *k;
  290. struct journal_replay *i =
  291. list_entry(list->prev, struct journal_replay, list);
  292. uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
  293. struct keylist keylist;
  294. list_for_each_entry(i, list, list) {
  295. BUG_ON(i->pin && atomic_read(i->pin) != 1);
  296. if (n != i->j.seq) {
  297. if (n == start && is_discard_enabled(s))
  298. pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
  299. n, i->j.seq - 1, start, end);
  300. else {
  301. pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
  302. n, i->j.seq - 1, start, end);
  303. ret = -EIO;
  304. goto err;
  305. }
  306. }
  307. for (k = i->j.start;
  308. k < bset_bkey_last(&i->j);
  309. k = bkey_next(k)) {
  310. trace_bcache_journal_replay_key(k);
  311. bch_keylist_init_single(&keylist, k);
  312. ret = bch_btree_insert(s, &keylist, i->pin, NULL);
  313. if (ret)
  314. goto err;
  315. BUG_ON(!bch_keylist_empty(&keylist));
  316. keys++;
  317. cond_resched();
  318. }
  319. if (i->pin)
  320. atomic_dec(i->pin);
  321. n = i->j.seq + 1;
  322. entries++;
  323. }
  324. pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
  325. keys, entries, end);
  326. err:
  327. while (!list_empty(list)) {
  328. i = list_first_entry(list, struct journal_replay, list);
  329. list_del(&i->list);
  330. kfree(i);
  331. }
  332. return ret;
  333. }
  334. void bch_journal_space_reserve(struct journal *j)
  335. {
  336. j->do_reserve = true;
  337. }
  338. /* Journalling */
  339. static void btree_flush_write(struct cache_set *c)
  340. {
  341. struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
  342. unsigned int i, nr;
  343. int ref_nr;
  344. atomic_t *fifo_front_p, *now_fifo_front_p;
  345. size_t mask;
  346. if (c->journal.btree_flushing)
  347. return;
  348. spin_lock(&c->journal.flush_write_lock);
  349. if (c->journal.btree_flushing) {
  350. spin_unlock(&c->journal.flush_write_lock);
  351. return;
  352. }
  353. c->journal.btree_flushing = true;
  354. spin_unlock(&c->journal.flush_write_lock);
  355. /* get the oldest journal entry and check its refcount */
  356. spin_lock(&c->journal.lock);
  357. fifo_front_p = &fifo_front(&c->journal.pin);
  358. ref_nr = atomic_read(fifo_front_p);
  359. if (ref_nr <= 0) {
  360. /*
  361. * do nothing if no btree node references
  362. * the oldest journal entry
  363. */
  364. spin_unlock(&c->journal.lock);
  365. goto out;
  366. }
  367. spin_unlock(&c->journal.lock);
  368. mask = c->journal.pin.mask;
  369. nr = 0;
  370. atomic_long_inc(&c->flush_write);
  371. memset(btree_nodes, 0, sizeof(btree_nodes));
  372. mutex_lock(&c->bucket_lock);
  373. list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
  374. /*
  375. * It is safe to get now_fifo_front_p without holding
  376. * c->journal.lock here, because we don't need to know
  377. * the exactly accurate value, just check whether the
  378. * front pointer of c->journal.pin is changed.
  379. */
  380. now_fifo_front_p = &fifo_front(&c->journal.pin);
  381. /*
  382. * If the oldest journal entry is reclaimed and front
  383. * pointer of c->journal.pin changes, it is unnecessary
  384. * to scan c->btree_cache anymore, just quit the loop and
  385. * flush out what we have already.
  386. */
  387. if (now_fifo_front_p != fifo_front_p)
  388. break;
  389. /*
  390. * quit this loop if all matching btree nodes are
  391. * scanned and record in btree_nodes[] already.
  392. */
  393. ref_nr = atomic_read(fifo_front_p);
  394. if (nr >= ref_nr)
  395. break;
  396. if (btree_node_journal_flush(b))
  397. pr_err("BUG: flush_write bit should not be set here!\n");
  398. mutex_lock(&b->write_lock);
  399. if (!btree_node_dirty(b)) {
  400. mutex_unlock(&b->write_lock);
  401. continue;
  402. }
  403. if (!btree_current_write(b)->journal) {
  404. mutex_unlock(&b->write_lock);
  405. continue;
  406. }
  407. /*
  408. * Only select the btree node which exactly references
  409. * the oldest journal entry.
  410. *
  411. * If the journal entry pointed by fifo_front_p is
  412. * reclaimed in parallel, don't worry:
  413. * - the list_for_each_xxx loop will quit when checking
  414. * next now_fifo_front_p.
  415. * - If there are matched nodes recorded in btree_nodes[],
  416. * they are clean now (this is why and how the oldest
  417. * journal entry can be reclaimed). These selected nodes
  418. * will be ignored and skipped in the following for-loop.
  419. */
  420. if (((btree_current_write(b)->journal - fifo_front_p) &
  421. mask) != 0) {
  422. mutex_unlock(&b->write_lock);
  423. continue;
  424. }
  425. set_btree_node_journal_flush(b);
  426. mutex_unlock(&b->write_lock);
  427. btree_nodes[nr++] = b;
  428. /*
  429. * To avoid holding c->bucket_lock too long time,
  430. * only scan for BTREE_FLUSH_NR matched btree nodes
  431. * at most. If there are more btree nodes reference
  432. * the oldest journal entry, try to flush them next
  433. * time when btree_flush_write() is called.
  434. */
  435. if (nr == BTREE_FLUSH_NR)
  436. break;
  437. }
  438. mutex_unlock(&c->bucket_lock);
  439. for (i = 0; i < nr; i++) {
  440. b = btree_nodes[i];
  441. if (!b) {
  442. pr_err("BUG: btree_nodes[%d] is NULL\n", i);
  443. continue;
  444. }
  445. /* safe to check without holding b->write_lock */
  446. if (!btree_node_journal_flush(b)) {
  447. pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
  448. continue;
  449. }
  450. mutex_lock(&b->write_lock);
  451. if (!btree_current_write(b)->journal) {
  452. clear_bit(BTREE_NODE_journal_flush, &b->flags);
  453. mutex_unlock(&b->write_lock);
  454. pr_debug("bnode %p: written by others\n", b);
  455. continue;
  456. }
  457. if (!btree_node_dirty(b)) {
  458. clear_bit(BTREE_NODE_journal_flush, &b->flags);
  459. mutex_unlock(&b->write_lock);
  460. pr_debug("bnode %p: dirty bit cleaned by others\n", b);
  461. continue;
  462. }
  463. __bch_btree_node_write(b, NULL);
  464. clear_bit(BTREE_NODE_journal_flush, &b->flags);
  465. mutex_unlock(&b->write_lock);
  466. }
  467. out:
  468. spin_lock(&c->journal.flush_write_lock);
  469. c->journal.btree_flushing = false;
  470. spin_unlock(&c->journal.flush_write_lock);
  471. }
  472. #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
  473. static void journal_discard_endio(struct bio *bio)
  474. {
  475. struct journal_device *ja =
  476. container_of(bio, struct journal_device, discard_bio);
  477. struct cache *ca = container_of(ja, struct cache, journal);
  478. atomic_set(&ja->discard_in_flight, DISCARD_DONE);
  479. closure_wake_up(&ca->set->journal.wait);
  480. closure_put(&ca->set->cl);
  481. }
  482. static void journal_discard_work(struct work_struct *work)
  483. {
  484. struct journal_device *ja =
  485. container_of(work, struct journal_device, discard_work);
  486. submit_bio(&ja->discard_bio);
  487. }
  488. static void do_journal_discard(struct cache *ca)
  489. {
  490. struct journal_device *ja = &ca->journal;
  491. struct bio *bio = &ja->discard_bio;
  492. if (!ca->discard) {
  493. ja->discard_idx = ja->last_idx;
  494. return;
  495. }
  496. switch (atomic_read(&ja->discard_in_flight)) {
  497. case DISCARD_IN_FLIGHT:
  498. return;
  499. case DISCARD_DONE:
  500. ja->discard_idx = (ja->discard_idx + 1) %
  501. ca->sb.njournal_buckets;
  502. atomic_set(&ja->discard_in_flight, DISCARD_READY);
  503. fallthrough;
  504. case DISCARD_READY:
  505. if (ja->discard_idx == ja->last_idx)
  506. return;
  507. atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
  508. bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
  509. bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
  510. ca->sb.d[ja->discard_idx]);
  511. bio->bi_iter.bi_size = bucket_bytes(ca);
  512. bio->bi_end_io = journal_discard_endio;
  513. closure_get(&ca->set->cl);
  514. INIT_WORK(&ja->discard_work, journal_discard_work);
  515. queue_work(bch_journal_wq, &ja->discard_work);
  516. }
  517. }
  518. static unsigned int free_journal_buckets(struct cache_set *c)
  519. {
  520. struct journal *j = &c->journal;
  521. struct cache *ca = c->cache;
  522. struct journal_device *ja = &c->cache->journal;
  523. unsigned int n;
  524. /* In case njournal_buckets is not power of 2 */
  525. if (ja->cur_idx >= ja->discard_idx)
  526. n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
  527. else
  528. n = ja->discard_idx - ja->cur_idx;
  529. if (n > (1 + j->do_reserve))
  530. return n - (1 + j->do_reserve);
  531. return 0;
  532. }
  533. static void journal_reclaim(struct cache_set *c)
  534. {
  535. struct bkey *k = &c->journal.key;
  536. struct cache *ca = c->cache;
  537. uint64_t last_seq;
  538. struct journal_device *ja = &ca->journal;
  539. atomic_t p __maybe_unused;
  540. atomic_long_inc(&c->reclaim);
  541. while (!atomic_read(&fifo_front(&c->journal.pin)))
  542. fifo_pop(&c->journal.pin, p);
  543. last_seq = last_seq(&c->journal);
  544. /* Update last_idx */
  545. while (ja->last_idx != ja->cur_idx &&
  546. ja->seq[ja->last_idx] < last_seq)
  547. ja->last_idx = (ja->last_idx + 1) %
  548. ca->sb.njournal_buckets;
  549. do_journal_discard(ca);
  550. if (c->journal.blocks_free)
  551. goto out;
  552. if (!free_journal_buckets(c))
  553. goto out;
  554. ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
  555. k->ptr[0] = MAKE_PTR(0,
  556. bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
  557. ca->sb.nr_this_dev);
  558. atomic_long_inc(&c->reclaimed_journal_buckets);
  559. bkey_init(k);
  560. SET_KEY_PTRS(k, 1);
  561. c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
  562. out:
  563. if (!journal_full(&c->journal))
  564. __closure_wake_up(&c->journal.wait);
  565. }
  566. void bch_journal_next(struct journal *j)
  567. {
  568. atomic_t p = { 1 };
  569. j->cur = (j->cur == j->w)
  570. ? &j->w[1]
  571. : &j->w[0];
  572. /*
  573. * The fifo_push() needs to happen at the same time as j->seq is
  574. * incremented for last_seq() to be calculated correctly
  575. */
  576. BUG_ON(!fifo_push(&j->pin, p));
  577. atomic_set(&fifo_back(&j->pin), 1);
  578. j->cur->data->seq = ++j->seq;
  579. j->cur->dirty = false;
  580. j->cur->need_write = false;
  581. j->cur->data->keys = 0;
  582. if (fifo_full(&j->pin))
  583. pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
  584. }
  585. static void journal_write_endio(struct bio *bio)
  586. {
  587. struct journal_write *w = bio->bi_private;
  588. cache_set_err_on(bio->bi_status, w->c, "journal io error");
  589. closure_put(&w->c->journal.io);
  590. }
  591. static void journal_write(struct closure *cl);
  592. static void journal_write_done(struct closure *cl)
  593. {
  594. struct journal *j = container_of(cl, struct journal, io);
  595. struct journal_write *w = (j->cur == j->w)
  596. ? &j->w[1]
  597. : &j->w[0];
  598. __closure_wake_up(&w->wait);
  599. continue_at_nobarrier(cl, journal_write, bch_journal_wq);
  600. }
  601. static void journal_write_unlock(struct closure *cl)
  602. __releases(&c->journal.lock)
  603. {
  604. struct cache_set *c = container_of(cl, struct cache_set, journal.io);
  605. c->journal.io_in_flight = 0;
  606. spin_unlock(&c->journal.lock);
  607. }
  608. static void journal_write_unlocked(struct closure *cl)
  609. __releases(c->journal.lock)
  610. {
  611. struct cache_set *c = container_of(cl, struct cache_set, journal.io);
  612. struct cache *ca = c->cache;
  613. struct journal_write *w = c->journal.cur;
  614. struct bkey *k = &c->journal.key;
  615. unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
  616. ca->sb.block_size;
  617. struct bio *bio;
  618. struct bio_list list;
  619. bio_list_init(&list);
  620. if (!w->need_write) {
  621. closure_return_with_destructor(cl, journal_write_unlock);
  622. return;
  623. } else if (journal_full(&c->journal)) {
  624. journal_reclaim(c);
  625. spin_unlock(&c->journal.lock);
  626. btree_flush_write(c);
  627. continue_at(cl, journal_write, bch_journal_wq);
  628. return;
  629. }
  630. c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
  631. w->data->btree_level = c->root->level;
  632. bkey_copy(&w->data->btree_root, &c->root->key);
  633. bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
  634. w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
  635. w->data->magic = jset_magic(&ca->sb);
  636. w->data->version = BCACHE_JSET_VERSION;
  637. w->data->last_seq = last_seq(&c->journal);
  638. w->data->csum = csum_set(w->data);
  639. for (i = 0; i < KEY_PTRS(k); i++) {
  640. ca = c->cache;
  641. bio = &ca->journal.bio;
  642. atomic_long_add(sectors, &ca->meta_sectors_written);
  643. bio_reset(bio, ca->bdev, REQ_OP_WRITE |
  644. REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
  645. bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
  646. bio->bi_iter.bi_size = sectors << 9;
  647. bio->bi_end_io = journal_write_endio;
  648. bio->bi_private = w;
  649. bch_bio_map(bio, w->data);
  650. trace_bcache_journal_write(bio, w->data->keys);
  651. bio_list_add(&list, bio);
  652. SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
  653. ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
  654. }
  655. /* If KEY_PTRS(k) == 0, this jset gets lost in air */
  656. BUG_ON(i == 0);
  657. atomic_dec_bug(&fifo_back(&c->journal.pin));
  658. bch_journal_next(&c->journal);
  659. journal_reclaim(c);
  660. spin_unlock(&c->journal.lock);
  661. while ((bio = bio_list_pop(&list)))
  662. closure_bio_submit(c, bio, cl);
  663. continue_at(cl, journal_write_done, NULL);
  664. }
  665. static void journal_write(struct closure *cl)
  666. {
  667. struct cache_set *c = container_of(cl, struct cache_set, journal.io);
  668. spin_lock(&c->journal.lock);
  669. journal_write_unlocked(cl);
  670. }
  671. static void journal_try_write(struct cache_set *c)
  672. __releases(c->journal.lock)
  673. {
  674. struct closure *cl = &c->journal.io;
  675. struct journal_write *w = c->journal.cur;
  676. w->need_write = true;
  677. if (!c->journal.io_in_flight) {
  678. c->journal.io_in_flight = 1;
  679. closure_call(cl, journal_write_unlocked, NULL, &c->cl);
  680. } else {
  681. spin_unlock(&c->journal.lock);
  682. }
  683. }
  684. static struct journal_write *journal_wait_for_write(struct cache_set *c,
  685. unsigned int nkeys)
  686. __acquires(&c->journal.lock)
  687. {
  688. size_t sectors;
  689. struct closure cl;
  690. bool wait = false;
  691. struct cache *ca = c->cache;
  692. closure_init_stack(&cl);
  693. spin_lock(&c->journal.lock);
  694. while (1) {
  695. struct journal_write *w = c->journal.cur;
  696. sectors = __set_blocks(w->data, w->data->keys + nkeys,
  697. block_bytes(ca)) * ca->sb.block_size;
  698. if (sectors <= min_t(size_t,
  699. c->journal.blocks_free * ca->sb.block_size,
  700. PAGE_SECTORS << JSET_BITS))
  701. return w;
  702. if (wait)
  703. closure_wait(&c->journal.wait, &cl);
  704. if (!journal_full(&c->journal)) {
  705. if (wait)
  706. trace_bcache_journal_entry_full(c);
  707. /*
  708. * XXX: If we were inserting so many keys that they
  709. * won't fit in an _empty_ journal write, we'll
  710. * deadlock. For now, handle this in
  711. * bch_keylist_realloc() - but something to think about.
  712. */
  713. BUG_ON(!w->data->keys);
  714. journal_try_write(c); /* unlocks */
  715. } else {
  716. if (wait)
  717. trace_bcache_journal_full(c);
  718. journal_reclaim(c);
  719. spin_unlock(&c->journal.lock);
  720. btree_flush_write(c);
  721. }
  722. closure_sync(&cl);
  723. spin_lock(&c->journal.lock);
  724. wait = true;
  725. }
  726. }
  727. static void journal_write_work(struct work_struct *work)
  728. {
  729. struct cache_set *c = container_of(to_delayed_work(work),
  730. struct cache_set,
  731. journal.work);
  732. spin_lock(&c->journal.lock);
  733. if (c->journal.cur->dirty)
  734. journal_try_write(c);
  735. else
  736. spin_unlock(&c->journal.lock);
  737. }
  738. /*
  739. * Entry point to the journalling code - bio_insert() and btree_invalidate()
  740. * pass bch_journal() a list of keys to be journalled, and then
  741. * bch_journal() hands those same keys off to btree_insert_async()
  742. */
  743. atomic_t *bch_journal(struct cache_set *c,
  744. struct keylist *keys,
  745. struct closure *parent)
  746. {
  747. struct journal_write *w;
  748. atomic_t *ret;
  749. /* No journaling if CACHE_SET_IO_DISABLE set already */
  750. if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
  751. return NULL;
  752. if (!CACHE_SYNC(&c->cache->sb))
  753. return NULL;
  754. w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
  755. memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
  756. w->data->keys += bch_keylist_nkeys(keys);
  757. ret = &fifo_back(&c->journal.pin);
  758. atomic_inc(ret);
  759. if (parent) {
  760. closure_wait(&w->wait, parent);
  761. journal_try_write(c);
  762. } else if (!w->dirty) {
  763. w->dirty = true;
  764. queue_delayed_work(bch_flush_wq, &c->journal.work,
  765. msecs_to_jiffies(c->journal_delay_ms));
  766. spin_unlock(&c->journal.lock);
  767. } else {
  768. spin_unlock(&c->journal.lock);
  769. }
  770. return ret;
  771. }
  772. void bch_journal_meta(struct cache_set *c, struct closure *cl)
  773. {
  774. struct keylist keys;
  775. atomic_t *ref;
  776. bch_keylist_init(&keys);
  777. ref = bch_journal(c, &keys, cl);
  778. if (ref)
  779. atomic_dec_bug(ref);
  780. }
  781. void bch_journal_free(struct cache_set *c)
  782. {
  783. free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
  784. free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
  785. free_fifo(&c->journal.pin);
  786. }
  787. int bch_journal_alloc(struct cache_set *c)
  788. {
  789. struct journal *j = &c->journal;
  790. spin_lock_init(&j->lock);
  791. spin_lock_init(&j->flush_write_lock);
  792. INIT_DELAYED_WORK(&j->work, journal_write_work);
  793. c->journal_delay_ms = 100;
  794. j->w[0].c = c;
  795. j->w[1].c = c;
  796. if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
  797. !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
  798. !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
  799. return -ENOMEM;
  800. return 0;
  801. }