ssg.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef SSG_H
  3. #define SSG_H
  4. #include "blk-cgroup.h"
  5. struct ssg_request_info {
  6. pid_t tgid;
  7. sector_t sector;
  8. unsigned int data_size;
  9. struct blkcg_gq *blkg;
  10. void *pio;
  11. };
  12. struct ssg_data {
  13. struct request_queue *queue;
  14. /*
  15. * requests are present on both sort_list and fifo_list
  16. */
  17. struct rb_root sort_list[2];
  18. struct list_head fifo_list[2];
  19. /*
  20. * next in sort order. read, write or both are NULL
  21. */
  22. struct request *next_rq[2];
  23. unsigned int starved_writes; /* times reads have starved writes */
  24. /*
  25. * settings that change how the i/o scheduler behaves
  26. */
  27. int fifo_expire[2];
  28. int max_write_starvation;
  29. int front_merges;
  30. /*
  31. * to control request allocation
  32. */
  33. atomic_t allocated_rqs;
  34. atomic_t async_write_rqs;
  35. int congestion_threshold_rqs;
  36. int max_tgroup_rqs;
  37. int max_async_write_rqs;
  38. unsigned int tgroup_shallow_depth; /* thread group shallow depth for each tag map */
  39. unsigned int async_write_shallow_depth; /* async write shallow depth for each tag map */
  40. /*
  41. * I/O context information for each request
  42. */
  43. struct ssg_request_info *rq_info;
  44. /*
  45. * Statistics
  46. */
  47. void __percpu *stats;
  48. spinlock_t lock;
  49. spinlock_t zone_lock;
  50. struct list_head dispatch;
  51. /*
  52. * Write booster
  53. */
  54. void *wb_data;
  55. };
  56. static inline struct cgroup_subsys_state *curr_css(void)
  57. {
  58. return task_css(current, io_cgrp_id);
  59. }
  60. /* ssg-stat.c */
  61. extern int ssg_stat_init(struct ssg_data *ssg);
  62. extern void ssg_stat_exit(struct ssg_data *ssg);
  63. extern void ssg_stat_account_io_done(struct ssg_data *ssg,
  64. struct request *rq, unsigned int data_size, u64 now);
  65. extern ssize_t ssg_stat_read_latency_show(struct elevator_queue *e, char *page);
  66. extern ssize_t ssg_stat_write_latency_show(struct elevator_queue *e, char *page);
  67. extern ssize_t ssg_stat_flush_latency_show(struct elevator_queue *e, char *page);
  68. extern ssize_t ssg_stat_discard_latency_show(struct elevator_queue *e, char *page);
  69. extern ssize_t ssg_stat_inflight_show(struct elevator_queue *e, char *page);
  70. extern ssize_t ssg_stat_rqs_info_show(struct elevator_queue *e, char *page);
  71. /* ssg-cgroup.c */
  72. #if IS_ENABLED(CONFIG_MQ_IOSCHED_SSG_CGROUP)
  73. struct ssg_blkcg {
  74. struct blkcg_policy_data cpd; /* must be the first member */
  75. int max_available_ratio;
  76. };
  77. struct ssg_blkg {
  78. struct blkg_policy_data pd; /* must be the first member */
  79. atomic_t current_rqs;
  80. int max_available_rqs;
  81. unsigned int shallow_depth; /* shallow depth for each tag map to get sched tag */
  82. };
  83. extern int ssg_blkcg_init(void);
  84. extern void ssg_blkcg_exit(void);
  85. extern int ssg_blkcg_activate(struct request_queue *q);
  86. extern void ssg_blkcg_deactivate(struct request_queue *q);
  87. extern unsigned int ssg_blkcg_shallow_depth(struct request_queue *q);
  88. extern void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx);
  89. extern void ssg_blkcg_inc_rq(struct blkcg_gq *blkg);
  90. extern void ssg_blkcg_dec_rq(struct blkcg_gq *blkg);
  91. #else
  92. static inline int ssg_blkcg_init(void)
  93. {
  94. return 0;
  95. }
  96. static inline void ssg_blkcg_exit(void)
  97. {
  98. }
  99. static inline int ssg_blkcg_activate(struct request_queue *q)
  100. {
  101. return 0;
  102. }
  103. static inline void ssg_blkcg_deactivate(struct request_queue *q)
  104. {
  105. }
  106. static inline unsigned int ssg_blkcg_shallow_depth(struct request_queue *q)
  107. {
  108. return 0;
  109. }
  110. static inline void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx)
  111. {
  112. }
  113. static inline void ssg_blkcg_inc_rq(struct blkcg_gq *blkg)
  114. {
  115. }
  116. static inline void ssg_blkcg_dec_rq(struct blkcg_gq *blkg)
  117. {
  118. }
  119. #endif
  120. /* ssg-wb.c */
  121. #if IS_ENABLED(CONFIG_MQ_IOSCHED_SSG_WB)
  122. extern void ssg_wb_run_ctrl_work(struct ssg_data *ssg, struct request *rq);
  123. extern void ssg_wb_depth_updated(struct blk_mq_hw_ctx *hctx);
  124. extern void ssg_wb_init(struct ssg_data *ssg);
  125. extern void ssg_wb_exit(struct ssg_data *ssg);
  126. extern ssize_t ssg_wb_on_rqs_show(struct elevator_queue *e, char *page);
  127. extern ssize_t ssg_wb_on_rqs_store(struct elevator_queue *e, const char *page, size_t count);
  128. extern ssize_t ssg_wb_off_rqs_show(struct elevator_queue *e, char *page);
  129. extern ssize_t ssg_wb_off_rqs_store(struct elevator_queue *e, const char *page, size_t count);
  130. extern ssize_t ssg_wb_on_dirty_bytes_show(struct elevator_queue *e, char *page);
  131. extern ssize_t ssg_wb_on_dirty_bytes_store(struct elevator_queue *e, const char *page, size_t count);
  132. extern ssize_t ssg_wb_off_dirty_bytes_show(struct elevator_queue *e, char *page);
  133. extern ssize_t ssg_wb_off_dirty_bytes_store(struct elevator_queue *e, const char *page, size_t count);
  134. extern ssize_t ssg_wb_on_sync_write_bytes_show(struct elevator_queue *e, char *page);
  135. extern ssize_t ssg_wb_on_sync_write_bytes_store(struct elevator_queue *e, const char *page, size_t count);
  136. extern ssize_t ssg_wb_off_sync_write_bytes_show(struct elevator_queue *e, char *page);
  137. extern ssize_t ssg_wb_off_sync_write_bytes_store(struct elevator_queue *e, const char *page, size_t count);
  138. extern ssize_t ssg_wb_on_dirty_busy_written_bytes_show(struct elevator_queue *e, char *page);
  139. extern ssize_t ssg_wb_on_dirty_busy_written_bytes_store(struct elevator_queue *e, const char *page, size_t count);
  140. extern ssize_t ssg_wb_on_dirty_busy_msecs_show(struct elevator_queue *e, char *page);
  141. extern ssize_t ssg_wb_on_dirty_busy_msecs_store(struct elevator_queue *e, const char *page, size_t count);
  142. extern ssize_t ssg_wb_off_delay_msecs_show(struct elevator_queue *e, char *page);
  143. extern ssize_t ssg_wb_off_delay_msecs_store(struct elevator_queue *e, const char *page, size_t count);
  144. extern ssize_t ssg_wb_triggered_show(struct elevator_queue *e, char *page);
  145. #else
  146. static inline void ssg_wb_run_ctrl_work(struct ssg_data *ssg, struct request *rq)
  147. {
  148. }
  149. static inline void ssg_wb_depth_updated(struct blk_mq_hw_ctx *hctx)
  150. {
  151. }
  152. static inline void ssg_wb_init(struct ssg_data *ssg)
  153. {
  154. }
  155. static inline void ssg_wb_exit(struct ssg_data *ssg)
  156. {
  157. }
  158. #endif
  159. #endif // SSG_H