backing-dev-defs.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_BACKING_DEV_DEFS_H
  3. #define __LINUX_BACKING_DEV_DEFS_H
  4. #include <linux/list.h>
  5. #include <linux/radix-tree.h>
  6. #include <linux/rbtree.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/percpu_counter.h>
  9. #include <linux/percpu-refcount.h>
  10. #include <linux/flex_proportions.h>
  11. #include <linux/timer.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/kref.h>
  14. #include <linux/refcount.h>
  15. #include <linux/android_kabi.h>
  16. struct page;
  17. struct device;
  18. struct dentry;
  19. /*
  20. * Bits in bdi_writeback.state
  21. */
  22. enum wb_state {
  23. WB_registered, /* bdi_register() was done */
  24. WB_writeback_running, /* Writeback is in progress */
  25. WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
  26. WB_start_all, /* nr_pages == 0 (all) work pending */
  27. };
  28. enum wb_stat_item {
  29. WB_RECLAIMABLE,
  30. WB_WRITEBACK,
  31. WB_DIRTIED,
  32. WB_WRITTEN,
  33. NR_WB_STAT_ITEMS
  34. };
  35. #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
  36. /*
  37. * why some writeback work was initiated
  38. */
  39. enum wb_reason {
  40. WB_REASON_BACKGROUND,
  41. WB_REASON_VMSCAN,
  42. WB_REASON_SYNC,
  43. WB_REASON_PERIODIC,
  44. WB_REASON_LAPTOP_TIMER,
  45. WB_REASON_FS_FREE_SPACE,
  46. /*
  47. * There is no bdi forker thread any more and works are done
  48. * by emergency worker, however, this is TPs userland visible
  49. * and we'll be exposing exactly the same information,
  50. * so it has a mismatch name.
  51. */
  52. WB_REASON_FORKER_THREAD,
  53. WB_REASON_FOREIGN_FLUSH,
  54. WB_REASON_MAX,
  55. };
  56. struct wb_completion {
  57. atomic_t cnt;
  58. wait_queue_head_t *waitq;
  59. };
  60. #define __WB_COMPLETION_INIT(_waitq) \
  61. (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
  62. /*
  63. * If one wants to wait for one or more wb_writeback_works, each work's
  64. * ->done should be set to a wb_completion defined using the following
  65. * macro. Once all work items are issued with wb_queue_work(), the caller
  66. * can wait for the completion of all using wb_wait_for_completion(). Work
  67. * items which are waited upon aren't freed automatically on completion.
  68. */
  69. #define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
  70. #define DEFINE_WB_COMPLETION(cmpl, bdi) \
  71. struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
  72. /*
  73. * Each wb (bdi_writeback) can perform writeback operations, is measured
  74. * and throttled, independently. Without cgroup writeback, each bdi
  75. * (bdi_writeback) is served by its embedded bdi->wb.
  76. *
  77. * On the default hierarchy, blkcg implicitly enables memcg. This allows
  78. * using memcg's page ownership for attributing writeback IOs, and every
  79. * memcg - blkcg combination can be served by its own wb by assigning a
  80. * dedicated wb to each memcg, which enables isolation across different
  81. * cgroups and propagation of IO back pressure down from the IO layer upto
  82. * the tasks which are generating the dirty pages to be written back.
  83. *
  84. * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
  85. * refcounted with the number of inodes attached to it, and pins the memcg
  86. * and the corresponding blkcg. As the corresponding blkcg for a memcg may
  87. * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
  88. * is tested for blkcg after lookup and removed from index on mismatch so
  89. * that a new wb for the combination can be created.
  90. *
  91. * Each bdi_writeback that is not embedded into the backing_dev_info must hold
  92. * a reference to the parent backing_dev_info. See cgwb_create() for details.
  93. */
  94. struct bdi_writeback {
  95. struct backing_dev_info *bdi; /* our parent bdi */
  96. unsigned long state; /* Always use atomic bitops on this */
  97. unsigned long last_old_flush; /* last old data flush */
  98. struct list_head b_dirty; /* dirty inodes */
  99. struct list_head b_io; /* parked for writeback */
  100. struct list_head b_more_io; /* parked for more writeback */
  101. struct list_head b_dirty_time; /* time stamps are dirty */
  102. spinlock_t list_lock; /* protects the b_* lists */
  103. atomic_t writeback_inodes; /* number of inodes under writeback */
  104. struct percpu_counter stat[NR_WB_STAT_ITEMS];
  105. unsigned long bw_time_stamp; /* last time write bw is updated */
  106. unsigned long dirtied_stamp;
  107. unsigned long written_stamp; /* pages written at bw_time_stamp */
  108. unsigned long write_bandwidth; /* the estimated write bandwidth */
  109. unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
  110. /*
  111. * The base dirty throttle rate, re-calculated on every 200ms.
  112. * All the bdi tasks' dirty rate will be curbed under it.
  113. * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
  114. * in small steps and is much more smooth/stable than the latter.
  115. */
  116. unsigned long dirty_ratelimit;
  117. unsigned long balanced_dirty_ratelimit;
  118. struct fprop_local_percpu completions;
  119. int dirty_exceeded;
  120. enum wb_reason start_all_reason;
  121. spinlock_t work_lock; /* protects work_list & dwork scheduling */
  122. struct list_head work_list;
  123. struct delayed_work dwork; /* work item used for writeback */
  124. struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
  125. unsigned long dirty_sleep; /* last wait */
  126. struct list_head bdi_node; /* anchored at bdi->wb_list */
  127. #ifdef CONFIG_CGROUP_WRITEBACK
  128. struct percpu_ref refcnt; /* used only for !root wb's */
  129. struct fprop_local_percpu memcg_completions;
  130. struct cgroup_subsys_state *memcg_css; /* the associated memcg */
  131. struct cgroup_subsys_state *blkcg_css; /* and blkcg */
  132. struct list_head memcg_node; /* anchored at memcg->cgwb_list */
  133. struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
  134. struct list_head b_attached; /* attached inodes, protected by list_lock */
  135. struct list_head offline_node; /* anchored at offline_cgwbs */
  136. union {
  137. struct work_struct release_work;
  138. struct rcu_head rcu;
  139. };
  140. #endif
  141. ANDROID_KABI_RESERVE(1);
  142. ANDROID_KABI_RESERVE(2);
  143. };
  144. struct backing_dev_info {
  145. u64 id;
  146. struct rb_node rb_node; /* keyed by ->id */
  147. struct list_head bdi_list;
  148. unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
  149. unsigned long io_pages; /* max allowed IO size */
  150. struct kref refcnt; /* Reference counter for the structure */
  151. unsigned int capabilities; /* Device capabilities */
  152. unsigned int min_ratio;
  153. unsigned int max_ratio, max_prop_frac;
  154. /*
  155. * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
  156. * any dirty wbs, which is depended upon by bdi_has_dirty().
  157. */
  158. atomic_long_t tot_write_bandwidth;
  159. struct bdi_writeback wb; /* the root writeback info for this bdi */
  160. struct list_head wb_list; /* list of all wbs */
  161. #ifdef CONFIG_CGROUP_WRITEBACK
  162. struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
  163. struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
  164. struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
  165. #endif
  166. wait_queue_head_t wb_waitq;
  167. struct device *dev;
  168. char dev_name[64];
  169. struct device *owner;
  170. struct timer_list laptop_mode_wb_timer;
  171. #ifdef CONFIG_DEBUG_FS
  172. struct dentry *debug_dir;
  173. #endif
  174. ANDROID_KABI_RESERVE(1);
  175. ANDROID_KABI_RESERVE(2);
  176. };
  177. struct wb_lock_cookie {
  178. bool locked;
  179. unsigned long flags;
  180. };
  181. #ifdef CONFIG_CGROUP_WRITEBACK
  182. /**
  183. * wb_tryget - try to increment a wb's refcount
  184. * @wb: bdi_writeback to get
  185. */
  186. static inline bool wb_tryget(struct bdi_writeback *wb)
  187. {
  188. if (wb != &wb->bdi->wb)
  189. return percpu_ref_tryget(&wb->refcnt);
  190. return true;
  191. }
  192. /**
  193. * wb_get - increment a wb's refcount
  194. * @wb: bdi_writeback to get
  195. */
  196. static inline void wb_get(struct bdi_writeback *wb)
  197. {
  198. if (wb != &wb->bdi->wb)
  199. percpu_ref_get(&wb->refcnt);
  200. }
  201. /**
  202. * wb_put - decrement a wb's refcount
  203. * @wb: bdi_writeback to put
  204. * @nr: number of references to put
  205. */
  206. static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
  207. {
  208. if (WARN_ON_ONCE(!wb->bdi)) {
  209. /*
  210. * A driver bug might cause a file to be removed before bdi was
  211. * initialized.
  212. */
  213. return;
  214. }
  215. if (wb != &wb->bdi->wb)
  216. percpu_ref_put_many(&wb->refcnt, nr);
  217. }
  218. /**
  219. * wb_put - decrement a wb's refcount
  220. * @wb: bdi_writeback to put
  221. */
  222. static inline void wb_put(struct bdi_writeback *wb)
  223. {
  224. wb_put_many(wb, 1);
  225. }
  226. /**
  227. * wb_dying - is a wb dying?
  228. * @wb: bdi_writeback of interest
  229. *
  230. * Returns whether @wb is unlinked and being drained.
  231. */
  232. static inline bool wb_dying(struct bdi_writeback *wb)
  233. {
  234. return percpu_ref_is_dying(&wb->refcnt);
  235. }
  236. #else /* CONFIG_CGROUP_WRITEBACK */
  237. static inline bool wb_tryget(struct bdi_writeback *wb)
  238. {
  239. return true;
  240. }
  241. static inline void wb_get(struct bdi_writeback *wb)
  242. {
  243. }
  244. static inline void wb_put(struct bdi_writeback *wb)
  245. {
  246. }
  247. static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
  248. {
  249. }
  250. static inline bool wb_dying(struct bdi_writeback *wb)
  251. {
  252. return false;
  253. }
  254. #endif /* CONFIG_CGROUP_WRITEBACK */
  255. #endif /* __LINUX_BACKING_DEV_DEFS_H */