dm-core.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Internal header file _only_ for device mapper core
  3. *
  4. * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the LGPL.
  7. */
  8. #ifndef DM_CORE_INTERNAL_H
  9. #define DM_CORE_INTERNAL_H
  10. #include <linux/kthread.h>
  11. #include <linux/ktime.h>
  12. #include <linux/blk-mq.h>
  13. #include <linux/blk-crypto-profile.h>
  14. #include <linux/jump_label.h>
  15. #include <trace/events/block.h>
  16. #include "dm.h"
  17. #include "dm-ima.h"
  18. #define DM_RESERVED_MAX_IOS 1024
  19. struct dm_io;
  20. struct dm_kobject_holder {
  21. struct kobject kobj;
  22. struct completion completion;
  23. };
  24. /*
  25. * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
  26. * DM targets must _not_ deference a mapped_device or dm_table to directly
  27. * access their members!
  28. */
  29. /*
  30. * For mempools pre-allocation at the table loading time.
  31. */
  32. struct dm_md_mempools {
  33. struct bio_set bs;
  34. struct bio_set io_bs;
  35. };
  36. struct mapped_device {
  37. struct mutex suspend_lock;
  38. struct mutex table_devices_lock;
  39. struct list_head table_devices;
  40. /*
  41. * The current mapping (struct dm_table *).
  42. * Use dm_get_live_table{_fast} or take suspend_lock for
  43. * dereference.
  44. */
  45. void __rcu *map;
  46. unsigned long flags;
  47. /* Protect queue and type against concurrent access. */
  48. struct mutex type_lock;
  49. enum dm_queue_mode type;
  50. int numa_node_id;
  51. struct request_queue *queue;
  52. atomic_t holders;
  53. atomic_t open_count;
  54. struct dm_target *immutable_target;
  55. struct target_type *immutable_target_type;
  56. char name[16];
  57. struct gendisk *disk;
  58. struct dax_device *dax_dev;
  59. wait_queue_head_t wait;
  60. unsigned long __percpu *pending_io;
  61. /* forced geometry settings */
  62. struct hd_geometry geometry;
  63. /*
  64. * Processing queue (flush)
  65. */
  66. struct workqueue_struct *wq;
  67. /*
  68. * A list of ios that arrived while we were suspended.
  69. */
  70. struct work_struct work;
  71. spinlock_t deferred_lock;
  72. struct bio_list deferred;
  73. /*
  74. * requeue work context is needed for cloning one new bio
  75. * to represent the dm_io to be requeued, since each
  76. * dm_io may point to the original bio from FS.
  77. */
  78. struct work_struct requeue_work;
  79. struct dm_io *requeue_list;
  80. void *interface_ptr;
  81. /*
  82. * Event handling.
  83. */
  84. wait_queue_head_t eventq;
  85. atomic_t event_nr;
  86. atomic_t uevent_seq;
  87. struct list_head uevent_list;
  88. spinlock_t uevent_lock; /* Protect access to uevent_list */
  89. /* for blk-mq request-based DM support */
  90. bool init_tio_pdu:1;
  91. struct blk_mq_tag_set *tag_set;
  92. struct dm_stats stats;
  93. /* the number of internal suspends */
  94. unsigned int internal_suspend_count;
  95. int swap_bios;
  96. struct semaphore swap_bios_semaphore;
  97. struct mutex swap_bios_lock;
  98. /*
  99. * io objects are allocated from here.
  100. */
  101. struct dm_md_mempools *mempools;
  102. /* kobject and completion */
  103. struct dm_kobject_holder kobj_holder;
  104. struct srcu_struct io_barrier;
  105. #ifdef CONFIG_BLK_DEV_ZONED
  106. unsigned int nr_zones;
  107. unsigned int *zwp_offset;
  108. #endif
  109. #ifdef CONFIG_IMA
  110. struct dm_ima_measurements ima;
  111. #endif
  112. };
  113. /*
  114. * Bits for the flags field of struct mapped_device.
  115. */
  116. #define DMF_BLOCK_IO_FOR_SUSPEND 0
  117. #define DMF_SUSPENDED 1
  118. #define DMF_FROZEN 2
  119. #define DMF_FREEING 3
  120. #define DMF_DELETING 4
  121. #define DMF_NOFLUSH_SUSPENDING 5
  122. #define DMF_DEFERRED_REMOVE 6
  123. #define DMF_SUSPENDED_INTERNALLY 7
  124. #define DMF_POST_SUSPENDING 8
  125. #define DMF_EMULATE_ZONE_APPEND 9
  126. void disable_discard(struct mapped_device *md);
  127. void disable_write_zeroes(struct mapped_device *md);
  128. static inline sector_t dm_get_size(struct mapped_device *md)
  129. {
  130. return get_capacity(md->disk);
  131. }
  132. static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
  133. {
  134. return &md->stats;
  135. }
  136. DECLARE_STATIC_KEY_FALSE(stats_enabled);
  137. DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
  138. DECLARE_STATIC_KEY_FALSE(zoned_enabled);
  139. static inline bool dm_emulate_zone_append(struct mapped_device *md)
  140. {
  141. if (blk_queue_is_zoned(md->queue))
  142. return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
  143. return false;
  144. }
  145. #define DM_TABLE_MAX_DEPTH 16
  146. struct dm_table {
  147. struct mapped_device *md;
  148. enum dm_queue_mode type;
  149. /* btree table */
  150. unsigned int depth;
  151. unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
  152. sector_t *index[DM_TABLE_MAX_DEPTH];
  153. unsigned int num_targets;
  154. unsigned int num_allocated;
  155. sector_t *highs;
  156. struct dm_target *targets;
  157. struct target_type *immutable_target_type;
  158. bool integrity_supported:1;
  159. bool singleton:1;
  160. unsigned integrity_added:1;
  161. /*
  162. * Indicates the rw permissions for the new logical
  163. * device. This should be a combination of FMODE_READ
  164. * and FMODE_WRITE.
  165. */
  166. fmode_t mode;
  167. /* a list of devices used by this table */
  168. struct list_head devices;
  169. /* events get handed up using this callback */
  170. void (*event_fn)(void *);
  171. void *event_context;
  172. struct dm_md_mempools *mempools;
  173. #ifdef CONFIG_BLK_INLINE_ENCRYPTION
  174. struct blk_crypto_profile *crypto_profile;
  175. #endif
  176. };
  177. static inline struct dm_target *dm_table_get_target(struct dm_table *t,
  178. unsigned int index)
  179. {
  180. BUG_ON(index >= t->num_targets);
  181. return t->targets + index;
  182. }
  183. /*
  184. * One of these is allocated per clone bio.
  185. */
  186. #define DM_TIO_MAGIC 28714
  187. struct dm_target_io {
  188. unsigned short magic;
  189. blk_short_t flags;
  190. unsigned int target_bio_nr;
  191. struct dm_io *io;
  192. struct dm_target *ti;
  193. unsigned int *len_ptr;
  194. sector_t old_sector;
  195. struct bio clone;
  196. };
  197. #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
  198. #define DM_IO_BIO_OFFSET \
  199. (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
  200. /*
  201. * dm_target_io flags
  202. */
  203. enum {
  204. DM_TIO_INSIDE_DM_IO,
  205. DM_TIO_IS_DUPLICATE_BIO
  206. };
  207. static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
  208. {
  209. return (tio->flags & (1U << bit)) != 0;
  210. }
  211. static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
  212. {
  213. tio->flags |= (1U << bit);
  214. }
  215. static inline bool dm_tio_is_normal(struct dm_target_io *tio)
  216. {
  217. return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
  218. !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
  219. }
  220. /*
  221. * One of these is allocated per original bio.
  222. * It contains the first clone used for that original.
  223. */
  224. #define DM_IO_MAGIC 19577
  225. struct dm_io {
  226. unsigned short magic;
  227. blk_short_t flags;
  228. spinlock_t lock;
  229. unsigned long start_time;
  230. void *data;
  231. struct dm_io *next;
  232. struct dm_stats_aux stats_aux;
  233. blk_status_t status;
  234. atomic_t io_count;
  235. struct mapped_device *md;
  236. /* The three fields represent mapped part of original bio */
  237. struct bio *orig_bio;
  238. unsigned int sector_offset; /* offset to end of orig_bio */
  239. unsigned int sectors;
  240. /* last member of dm_target_io is 'struct bio' */
  241. struct dm_target_io tio;
  242. };
  243. /*
  244. * dm_io flags
  245. */
  246. enum {
  247. DM_IO_ACCOUNTED,
  248. DM_IO_WAS_SPLIT
  249. };
  250. static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
  251. {
  252. return (io->flags & (1U << bit)) != 0;
  253. }
  254. static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
  255. {
  256. io->flags |= (1U << bit);
  257. }
  258. void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
  259. static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
  260. {
  261. return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
  262. }
  263. unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
  264. static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
  265. {
  266. return !maxlen || strlen(result) + 1 >= maxlen;
  267. }
  268. extern atomic_t dm_global_event_nr;
  269. extern wait_queue_head_t dm_global_eventq;
  270. void dm_issue_global_event(void);
  271. #endif