dm-log.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the LGPL.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/module.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/dm-io.h>
  12. #include <linux/dm-dirty-log.h>
  13. #include <linux/device-mapper.h>
  14. #define DM_MSG_PREFIX "dirty region log"
  15. static LIST_HEAD(_log_types);
  16. static DEFINE_SPINLOCK(_lock);
  17. static struct dm_dirty_log_type *__find_dirty_log_type(const char *name)
  18. {
  19. struct dm_dirty_log_type *log_type;
  20. list_for_each_entry(log_type, &_log_types, list)
  21. if (!strcmp(name, log_type->name))
  22. return log_type;
  23. return NULL;
  24. }
  25. static struct dm_dirty_log_type *_get_dirty_log_type(const char *name)
  26. {
  27. struct dm_dirty_log_type *log_type;
  28. spin_lock(&_lock);
  29. log_type = __find_dirty_log_type(name);
  30. if (log_type && !try_module_get(log_type->module))
  31. log_type = NULL;
  32. spin_unlock(&_lock);
  33. return log_type;
  34. }
  35. /*
  36. * get_type
  37. * @type_name
  38. *
  39. * Attempt to retrieve the dm_dirty_log_type by name. If not already
  40. * available, attempt to load the appropriate module.
  41. *
  42. * Log modules are named "dm-log-" followed by the 'type_name'.
  43. * Modules may contain multiple types.
  44. * This function will first try the module "dm-log-<type_name>",
  45. * then truncate 'type_name' on the last '-' and try again.
  46. *
  47. * For example, if type_name was "clustered-disk", it would search
  48. * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  49. *
  50. * Returns: dirty_log_type* on success, NULL on failure
  51. */
  52. static struct dm_dirty_log_type *get_type(const char *type_name)
  53. {
  54. char *p, *type_name_dup;
  55. struct dm_dirty_log_type *log_type;
  56. if (!type_name)
  57. return NULL;
  58. log_type = _get_dirty_log_type(type_name);
  59. if (log_type)
  60. return log_type;
  61. type_name_dup = kstrdup(type_name, GFP_KERNEL);
  62. if (!type_name_dup) {
  63. DMWARN("No memory left to attempt log module load for \"%s\"",
  64. type_name);
  65. return NULL;
  66. }
  67. while (request_module("dm-log-%s", type_name_dup) ||
  68. !(log_type = _get_dirty_log_type(type_name))) {
  69. p = strrchr(type_name_dup, '-');
  70. if (!p)
  71. break;
  72. p[0] = '\0';
  73. }
  74. if (!log_type)
  75. DMWARN("Module for logging type \"%s\" not found.", type_name);
  76. kfree(type_name_dup);
  77. return log_type;
  78. }
  79. static void put_type(struct dm_dirty_log_type *type)
  80. {
  81. if (!type)
  82. return;
  83. spin_lock(&_lock);
  84. if (!__find_dirty_log_type(type->name))
  85. goto out;
  86. module_put(type->module);
  87. out:
  88. spin_unlock(&_lock);
  89. }
  90. int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
  91. {
  92. int r = 0;
  93. spin_lock(&_lock);
  94. if (!__find_dirty_log_type(type->name))
  95. list_add(&type->list, &_log_types);
  96. else
  97. r = -EEXIST;
  98. spin_unlock(&_lock);
  99. return r;
  100. }
  101. EXPORT_SYMBOL(dm_dirty_log_type_register);
  102. int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
  103. {
  104. spin_lock(&_lock);
  105. if (!__find_dirty_log_type(type->name)) {
  106. spin_unlock(&_lock);
  107. return -EINVAL;
  108. }
  109. list_del(&type->list);
  110. spin_unlock(&_lock);
  111. return 0;
  112. }
  113. EXPORT_SYMBOL(dm_dirty_log_type_unregister);
  114. struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
  115. struct dm_target *ti,
  116. int (*flush_callback_fn)(struct dm_target *ti),
  117. unsigned int argc, char **argv)
  118. {
  119. struct dm_dirty_log_type *type;
  120. struct dm_dirty_log *log;
  121. log = kmalloc(sizeof(*log), GFP_KERNEL);
  122. if (!log)
  123. return NULL;
  124. type = get_type(type_name);
  125. if (!type) {
  126. kfree(log);
  127. return NULL;
  128. }
  129. log->flush_callback_fn = flush_callback_fn;
  130. log->type = type;
  131. if (type->ctr(log, ti, argc, argv)) {
  132. kfree(log);
  133. put_type(type);
  134. return NULL;
  135. }
  136. return log;
  137. }
  138. EXPORT_SYMBOL(dm_dirty_log_create);
  139. void dm_dirty_log_destroy(struct dm_dirty_log *log)
  140. {
  141. log->type->dtr(log);
  142. put_type(log->type);
  143. kfree(log);
  144. }
  145. EXPORT_SYMBOL(dm_dirty_log_destroy);
  146. /*-----------------------------------------------------------------
  147. * Persistent and core logs share a lot of their implementation.
  148. * FIXME: need a reload method to be called from a resume
  149. *---------------------------------------------------------------*/
  150. /*
  151. * Magic for persistent mirrors: "MiRr"
  152. */
  153. #define MIRROR_MAGIC 0x4D695272
  154. /*
  155. * The on-disk version of the metadata.
  156. */
  157. #define MIRROR_DISK_VERSION 2
  158. #define LOG_OFFSET 2
  159. struct log_header_disk {
  160. __le32 magic;
  161. /*
  162. * Simple, incrementing version. no backward
  163. * compatibility.
  164. */
  165. __le32 version;
  166. __le64 nr_regions;
  167. } __packed;
  168. struct log_header_core {
  169. uint32_t magic;
  170. uint32_t version;
  171. uint64_t nr_regions;
  172. };
  173. struct log_c {
  174. struct dm_target *ti;
  175. int touched_dirtied;
  176. int touched_cleaned;
  177. int flush_failed;
  178. uint32_t region_size;
  179. unsigned int region_count;
  180. region_t sync_count;
  181. unsigned int bitset_uint32_count;
  182. uint32_t *clean_bits;
  183. uint32_t *sync_bits;
  184. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  185. int sync_search;
  186. /* Resync flag */
  187. enum sync {
  188. DEFAULTSYNC, /* Synchronize if necessary */
  189. NOSYNC, /* Devices known to be already in sync */
  190. FORCESYNC, /* Force a sync to happen */
  191. } sync;
  192. struct dm_io_request io_req;
  193. /*
  194. * Disk log fields
  195. */
  196. int log_dev_failed;
  197. int log_dev_flush_failed;
  198. struct dm_dev *log_dev;
  199. struct log_header_core header;
  200. struct dm_io_region header_location;
  201. struct log_header_disk *disk_header;
  202. };
  203. /*
  204. * The touched member needs to be updated every time we access
  205. * one of the bitsets.
  206. */
  207. static inline int log_test_bit(uint32_t *bs, unsigned int bit)
  208. {
  209. return test_bit_le(bit, bs) ? 1 : 0;
  210. }
  211. static inline void log_set_bit(struct log_c *l,
  212. uint32_t *bs, unsigned int bit)
  213. {
  214. __set_bit_le(bit, bs);
  215. l->touched_cleaned = 1;
  216. }
  217. static inline void log_clear_bit(struct log_c *l,
  218. uint32_t *bs, unsigned int bit)
  219. {
  220. __clear_bit_le(bit, bs);
  221. l->touched_dirtied = 1;
  222. }
  223. /*----------------------------------------------------------------
  224. * Header IO
  225. *--------------------------------------------------------------*/
  226. static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
  227. {
  228. disk->magic = cpu_to_le32(core->magic);
  229. disk->version = cpu_to_le32(core->version);
  230. disk->nr_regions = cpu_to_le64(core->nr_regions);
  231. }
  232. static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
  233. {
  234. core->magic = le32_to_cpu(disk->magic);
  235. core->version = le32_to_cpu(disk->version);
  236. core->nr_regions = le64_to_cpu(disk->nr_regions);
  237. }
  238. static int rw_header(struct log_c *lc, enum req_op op)
  239. {
  240. lc->io_req.bi_opf = op;
  241. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  242. }
  243. static int flush_header(struct log_c *lc)
  244. {
  245. struct dm_io_region null_location = {
  246. .bdev = lc->header_location.bdev,
  247. .sector = 0,
  248. .count = 0,
  249. };
  250. lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
  251. return dm_io(&lc->io_req, 1, &null_location, NULL);
  252. }
  253. static int read_header(struct log_c *log)
  254. {
  255. int r;
  256. r = rw_header(log, REQ_OP_READ);
  257. if (r)
  258. return r;
  259. header_from_disk(&log->header, log->disk_header);
  260. /* New log required? */
  261. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  262. log->header.magic = MIRROR_MAGIC;
  263. log->header.version = MIRROR_DISK_VERSION;
  264. log->header.nr_regions = 0;
  265. }
  266. #ifdef __LITTLE_ENDIAN
  267. if (log->header.version == 1)
  268. log->header.version = 2;
  269. #endif
  270. if (log->header.version != MIRROR_DISK_VERSION) {
  271. DMWARN("incompatible disk log version");
  272. return -EINVAL;
  273. }
  274. return 0;
  275. }
  276. static int _check_region_size(struct dm_target *ti, uint32_t region_size)
  277. {
  278. if (region_size < 2 || region_size > ti->len)
  279. return 0;
  280. if (!is_power_of_2(region_size))
  281. return 0;
  282. return 1;
  283. }
  284. /*----------------------------------------------------------------
  285. * core log constructor/destructor
  286. *
  287. * argv contains region_size followed optionally by [no]sync
  288. *--------------------------------------------------------------*/
  289. #define BYTE_SHIFT 3
  290. static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
  291. unsigned int argc, char **argv,
  292. struct dm_dev *dev)
  293. {
  294. enum sync sync = DEFAULTSYNC;
  295. struct log_c *lc;
  296. uint32_t region_size;
  297. unsigned int region_count;
  298. size_t bitset_size, buf_size;
  299. int r;
  300. char dummy;
  301. if (argc < 1 || argc > 2) {
  302. DMWARN("wrong number of arguments to dirty region log");
  303. return -EINVAL;
  304. }
  305. if (argc > 1) {
  306. if (!strcmp(argv[1], "sync"))
  307. sync = FORCESYNC;
  308. else if (!strcmp(argv[1], "nosync"))
  309. sync = NOSYNC;
  310. else {
  311. DMWARN("unrecognised sync argument to dirty region log: %s", argv[1]);
  312. return -EINVAL;
  313. }
  314. }
  315. if (sscanf(argv[0], "%u%c", &region_size, &dummy) != 1 ||
  316. !_check_region_size(ti, region_size)) {
  317. DMWARN("invalid region size %s", argv[0]);
  318. return -EINVAL;
  319. }
  320. region_count = dm_sector_div_up(ti->len, region_size);
  321. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  322. if (!lc) {
  323. DMWARN("couldn't allocate core log");
  324. return -ENOMEM;
  325. }
  326. lc->ti = ti;
  327. lc->touched_dirtied = 0;
  328. lc->touched_cleaned = 0;
  329. lc->flush_failed = 0;
  330. lc->region_size = region_size;
  331. lc->region_count = region_count;
  332. lc->sync = sync;
  333. /*
  334. * Work out how many "unsigned long"s we need to hold the bitset.
  335. */
  336. bitset_size = dm_round_up(region_count, BITS_PER_LONG);
  337. bitset_size >>= BYTE_SHIFT;
  338. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  339. /*
  340. * Disk log?
  341. */
  342. if (!dev) {
  343. lc->clean_bits = vmalloc(bitset_size);
  344. if (!lc->clean_bits) {
  345. DMWARN("couldn't allocate clean bitset");
  346. kfree(lc);
  347. return -ENOMEM;
  348. }
  349. lc->disk_header = NULL;
  350. } else {
  351. lc->log_dev = dev;
  352. lc->log_dev_failed = 0;
  353. lc->log_dev_flush_failed = 0;
  354. lc->header_location.bdev = lc->log_dev->bdev;
  355. lc->header_location.sector = 0;
  356. /*
  357. * Buffer holds both header and bitset.
  358. */
  359. buf_size =
  360. dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
  361. bdev_logical_block_size(lc->header_location.
  362. bdev));
  363. if (buf_size > bdev_nr_bytes(dev->bdev)) {
  364. DMWARN("log device %s too small: need %llu bytes",
  365. dev->name, (unsigned long long)buf_size);
  366. kfree(lc);
  367. return -EINVAL;
  368. }
  369. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  370. lc->io_req.mem.type = DM_IO_VMA;
  371. lc->io_req.notify.fn = NULL;
  372. lc->io_req.client = dm_io_client_create();
  373. if (IS_ERR(lc->io_req.client)) {
  374. r = PTR_ERR(lc->io_req.client);
  375. DMWARN("couldn't allocate disk io client");
  376. kfree(lc);
  377. return r;
  378. }
  379. lc->disk_header = vmalloc(buf_size);
  380. if (!lc->disk_header) {
  381. DMWARN("couldn't allocate disk log buffer");
  382. dm_io_client_destroy(lc->io_req.client);
  383. kfree(lc);
  384. return -ENOMEM;
  385. }
  386. lc->io_req.mem.ptr.vma = lc->disk_header;
  387. lc->clean_bits = (void *)lc->disk_header +
  388. (LOG_OFFSET << SECTOR_SHIFT);
  389. }
  390. memset(lc->clean_bits, -1, bitset_size);
  391. lc->sync_bits = vmalloc(bitset_size);
  392. if (!lc->sync_bits) {
  393. DMWARN("couldn't allocate sync bitset");
  394. if (!dev)
  395. vfree(lc->clean_bits);
  396. else
  397. dm_io_client_destroy(lc->io_req.client);
  398. vfree(lc->disk_header);
  399. kfree(lc);
  400. return -ENOMEM;
  401. }
  402. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  403. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  404. lc->recovering_bits = vzalloc(bitset_size);
  405. if (!lc->recovering_bits) {
  406. DMWARN("couldn't allocate sync bitset");
  407. vfree(lc->sync_bits);
  408. if (!dev)
  409. vfree(lc->clean_bits);
  410. else
  411. dm_io_client_destroy(lc->io_req.client);
  412. vfree(lc->disk_header);
  413. kfree(lc);
  414. return -ENOMEM;
  415. }
  416. lc->sync_search = 0;
  417. log->context = lc;
  418. return 0;
  419. }
  420. static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  421. unsigned int argc, char **argv)
  422. {
  423. return create_log_context(log, ti, argc, argv, NULL);
  424. }
  425. static void destroy_log_context(struct log_c *lc)
  426. {
  427. vfree(lc->sync_bits);
  428. vfree(lc->recovering_bits);
  429. kfree(lc);
  430. }
  431. static void core_dtr(struct dm_dirty_log *log)
  432. {
  433. struct log_c *lc = (struct log_c *) log->context;
  434. vfree(lc->clean_bits);
  435. destroy_log_context(lc);
  436. }
  437. /*----------------------------------------------------------------
  438. * disk log constructor/destructor
  439. *
  440. * argv contains log_device region_size followed optionally by [no]sync
  441. *--------------------------------------------------------------*/
  442. static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  443. unsigned int argc, char **argv)
  444. {
  445. int r;
  446. struct dm_dev *dev;
  447. if (argc < 2 || argc > 3) {
  448. DMWARN("wrong number of arguments to disk dirty region log");
  449. return -EINVAL;
  450. }
  451. r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
  452. if (r)
  453. return r;
  454. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  455. if (r) {
  456. dm_put_device(ti, dev);
  457. return r;
  458. }
  459. return 0;
  460. }
  461. static void disk_dtr(struct dm_dirty_log *log)
  462. {
  463. struct log_c *lc = (struct log_c *) log->context;
  464. dm_put_device(lc->ti, lc->log_dev);
  465. vfree(lc->disk_header);
  466. dm_io_client_destroy(lc->io_req.client);
  467. destroy_log_context(lc);
  468. }
  469. static void fail_log_device(struct log_c *lc)
  470. {
  471. if (lc->log_dev_failed)
  472. return;
  473. lc->log_dev_failed = 1;
  474. dm_table_event(lc->ti->table);
  475. }
  476. static int disk_resume(struct dm_dirty_log *log)
  477. {
  478. int r;
  479. unsigned int i;
  480. struct log_c *lc = (struct log_c *) log->context;
  481. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  482. /* read the disk header */
  483. r = read_header(lc);
  484. if (r) {
  485. DMWARN("%s: Failed to read header on dirty region log device",
  486. lc->log_dev->name);
  487. fail_log_device(lc);
  488. /*
  489. * If the log device cannot be read, we must assume
  490. * all regions are out-of-sync. If we simply return
  491. * here, the state will be uninitialized and could
  492. * lead us to return 'in-sync' status for regions
  493. * that are actually 'out-of-sync'.
  494. */
  495. lc->header.nr_regions = 0;
  496. }
  497. /* set or clear any new bits -- device has grown */
  498. if (lc->sync == NOSYNC)
  499. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  500. /* FIXME: amazingly inefficient */
  501. log_set_bit(lc, lc->clean_bits, i);
  502. else
  503. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  504. /* FIXME: amazingly inefficient */
  505. log_clear_bit(lc, lc->clean_bits, i);
  506. /* clear any old bits -- device has shrunk */
  507. for (i = lc->region_count; i % BITS_PER_LONG; i++)
  508. log_clear_bit(lc, lc->clean_bits, i);
  509. /* copy clean across to sync */
  510. memcpy(lc->sync_bits, lc->clean_bits, size);
  511. lc->sync_count = memweight(lc->clean_bits,
  512. lc->bitset_uint32_count * sizeof(uint32_t));
  513. lc->sync_search = 0;
  514. /* set the correct number of regions in the header */
  515. lc->header.nr_regions = lc->region_count;
  516. header_to_disk(&lc->header, lc->disk_header);
  517. /* write the new header */
  518. r = rw_header(lc, REQ_OP_WRITE);
  519. if (!r) {
  520. r = flush_header(lc);
  521. if (r)
  522. lc->log_dev_flush_failed = 1;
  523. }
  524. if (r) {
  525. DMWARN("%s: Failed to write header on dirty region log device",
  526. lc->log_dev->name);
  527. fail_log_device(lc);
  528. }
  529. return r;
  530. }
  531. static uint32_t core_get_region_size(struct dm_dirty_log *log)
  532. {
  533. struct log_c *lc = (struct log_c *) log->context;
  534. return lc->region_size;
  535. }
  536. static int core_resume(struct dm_dirty_log *log)
  537. {
  538. struct log_c *lc = (struct log_c *) log->context;
  539. lc->sync_search = 0;
  540. return 0;
  541. }
  542. static int core_is_clean(struct dm_dirty_log *log, region_t region)
  543. {
  544. struct log_c *lc = (struct log_c *) log->context;
  545. return log_test_bit(lc->clean_bits, region);
  546. }
  547. static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
  548. {
  549. struct log_c *lc = (struct log_c *) log->context;
  550. return log_test_bit(lc->sync_bits, region);
  551. }
  552. static int core_flush(struct dm_dirty_log *log)
  553. {
  554. /* no op */
  555. return 0;
  556. }
  557. static int disk_flush(struct dm_dirty_log *log)
  558. {
  559. int r, i;
  560. struct log_c *lc = log->context;
  561. /* only write if the log has changed */
  562. if (!lc->touched_cleaned && !lc->touched_dirtied)
  563. return 0;
  564. if (lc->touched_cleaned && log->flush_callback_fn &&
  565. log->flush_callback_fn(lc->ti)) {
  566. /*
  567. * At this point it is impossible to determine which
  568. * regions are clean and which are dirty (without
  569. * re-reading the log off disk). So mark all of them
  570. * dirty.
  571. */
  572. lc->flush_failed = 1;
  573. for (i = 0; i < lc->region_count; i++)
  574. log_clear_bit(lc, lc->clean_bits, i);
  575. }
  576. r = rw_header(lc, REQ_OP_WRITE);
  577. if (r)
  578. fail_log_device(lc);
  579. else {
  580. if (lc->touched_dirtied) {
  581. r = flush_header(lc);
  582. if (r) {
  583. lc->log_dev_flush_failed = 1;
  584. fail_log_device(lc);
  585. } else
  586. lc->touched_dirtied = 0;
  587. }
  588. lc->touched_cleaned = 0;
  589. }
  590. return r;
  591. }
  592. static void core_mark_region(struct dm_dirty_log *log, region_t region)
  593. {
  594. struct log_c *lc = (struct log_c *) log->context;
  595. log_clear_bit(lc, lc->clean_bits, region);
  596. }
  597. static void core_clear_region(struct dm_dirty_log *log, region_t region)
  598. {
  599. struct log_c *lc = (struct log_c *) log->context;
  600. if (likely(!lc->flush_failed))
  601. log_set_bit(lc, lc->clean_bits, region);
  602. }
  603. static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
  604. {
  605. struct log_c *lc = (struct log_c *) log->context;
  606. if (lc->sync_search >= lc->region_count)
  607. return 0;
  608. do {
  609. *region = find_next_zero_bit_le(lc->sync_bits,
  610. lc->region_count,
  611. lc->sync_search);
  612. lc->sync_search = *region + 1;
  613. if (*region >= lc->region_count)
  614. return 0;
  615. } while (log_test_bit(lc->recovering_bits, *region));
  616. log_set_bit(lc, lc->recovering_bits, *region);
  617. return 1;
  618. }
  619. static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
  620. int in_sync)
  621. {
  622. struct log_c *lc = (struct log_c *) log->context;
  623. log_clear_bit(lc, lc->recovering_bits, region);
  624. if (in_sync) {
  625. log_set_bit(lc, lc->sync_bits, region);
  626. lc->sync_count++;
  627. } else if (log_test_bit(lc->sync_bits, region)) {
  628. lc->sync_count--;
  629. log_clear_bit(lc, lc->sync_bits, region);
  630. }
  631. }
  632. static region_t core_get_sync_count(struct dm_dirty_log *log)
  633. {
  634. struct log_c *lc = (struct log_c *) log->context;
  635. return lc->sync_count;
  636. }
  637. #define DMEMIT_SYNC \
  638. if (lc->sync != DEFAULTSYNC) \
  639. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  640. static int core_status(struct dm_dirty_log *log, status_type_t status,
  641. char *result, unsigned int maxlen)
  642. {
  643. int sz = 0;
  644. struct log_c *lc = log->context;
  645. switch(status) {
  646. case STATUSTYPE_INFO:
  647. DMEMIT("1 %s", log->type->name);
  648. break;
  649. case STATUSTYPE_TABLE:
  650. DMEMIT("%s %u %u ", log->type->name,
  651. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  652. DMEMIT_SYNC;
  653. break;
  654. case STATUSTYPE_IMA:
  655. *result = '\0';
  656. break;
  657. }
  658. return sz;
  659. }
  660. static int disk_status(struct dm_dirty_log *log, status_type_t status,
  661. char *result, unsigned int maxlen)
  662. {
  663. int sz = 0;
  664. struct log_c *lc = log->context;
  665. switch(status) {
  666. case STATUSTYPE_INFO:
  667. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  668. lc->log_dev_flush_failed ? 'F' :
  669. lc->log_dev_failed ? 'D' :
  670. 'A');
  671. break;
  672. case STATUSTYPE_TABLE:
  673. DMEMIT("%s %u %s %u ", log->type->name,
  674. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  675. lc->region_size);
  676. DMEMIT_SYNC;
  677. break;
  678. case STATUSTYPE_IMA:
  679. *result = '\0';
  680. break;
  681. }
  682. return sz;
  683. }
  684. static struct dm_dirty_log_type _core_type = {
  685. .name = "core",
  686. .module = THIS_MODULE,
  687. .ctr = core_ctr,
  688. .dtr = core_dtr,
  689. .resume = core_resume,
  690. .get_region_size = core_get_region_size,
  691. .is_clean = core_is_clean,
  692. .in_sync = core_in_sync,
  693. .flush = core_flush,
  694. .mark_region = core_mark_region,
  695. .clear_region = core_clear_region,
  696. .get_resync_work = core_get_resync_work,
  697. .set_region_sync = core_set_region_sync,
  698. .get_sync_count = core_get_sync_count,
  699. .status = core_status,
  700. };
  701. static struct dm_dirty_log_type _disk_type = {
  702. .name = "disk",
  703. .module = THIS_MODULE,
  704. .ctr = disk_ctr,
  705. .dtr = disk_dtr,
  706. .postsuspend = disk_flush,
  707. .resume = disk_resume,
  708. .get_region_size = core_get_region_size,
  709. .is_clean = core_is_clean,
  710. .in_sync = core_in_sync,
  711. .flush = disk_flush,
  712. .mark_region = core_mark_region,
  713. .clear_region = core_clear_region,
  714. .get_resync_work = core_get_resync_work,
  715. .set_region_sync = core_set_region_sync,
  716. .get_sync_count = core_get_sync_count,
  717. .status = disk_status,
  718. };
  719. static int __init dm_dirty_log_init(void)
  720. {
  721. int r;
  722. r = dm_dirty_log_type_register(&_core_type);
  723. if (r)
  724. DMWARN("couldn't register core log");
  725. r = dm_dirty_log_type_register(&_disk_type);
  726. if (r) {
  727. DMWARN("couldn't register disk type");
  728. dm_dirty_log_type_unregister(&_core_type);
  729. }
  730. return r;
  731. }
  732. static void __exit dm_dirty_log_exit(void)
  733. {
  734. dm_dirty_log_type_unregister(&_disk_type);
  735. dm_dirty_log_type_unregister(&_core_type);
  736. }
  737. module_init(dm_dirty_log_init);
  738. module_exit(dm_dirty_log_exit);
  739. MODULE_DESCRIPTION(DM_NAME " dirty region log");
  740. MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <[email protected]>");
  741. MODULE_LICENSE("GPL");