blk-rq-qos.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef RQ_QOS_H
  3. #define RQ_QOS_H
  4. #include <linux/kernel.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/blk_types.h>
  7. #include <linux/atomic.h>
  8. #include <linux/wait.h>
  9. #include <linux/blk-mq.h>
  10. #include "blk-mq-debugfs.h"
  11. struct blk_mq_debugfs_attr;
  12. enum rq_qos_id {
  13. RQ_QOS_WBT,
  14. RQ_QOS_LATENCY,
  15. RQ_QOS_COST,
  16. };
  17. struct rq_wait {
  18. wait_queue_head_t wait;
  19. atomic_t inflight;
  20. };
  21. struct rq_qos {
  22. struct rq_qos_ops *ops;
  23. struct request_queue *q;
  24. enum rq_qos_id id;
  25. struct rq_qos *next;
  26. #ifdef CONFIG_BLK_DEBUG_FS
  27. struct dentry *debugfs_dir;
  28. #endif
  29. };
  30. struct rq_qos_ops {
  31. void (*throttle)(struct rq_qos *, struct bio *);
  32. void (*track)(struct rq_qos *, struct request *, struct bio *);
  33. void (*merge)(struct rq_qos *, struct request *, struct bio *);
  34. void (*issue)(struct rq_qos *, struct request *);
  35. void (*requeue)(struct rq_qos *, struct request *);
  36. void (*done)(struct rq_qos *, struct request *);
  37. void (*done_bio)(struct rq_qos *, struct bio *);
  38. void (*cleanup)(struct rq_qos *, struct bio *);
  39. void (*queue_depth_changed)(struct rq_qos *);
  40. void (*exit)(struct rq_qos *);
  41. const struct blk_mq_debugfs_attr *debugfs_attrs;
  42. };
  43. struct rq_depth {
  44. unsigned int max_depth;
  45. int scale_step;
  46. bool scaled_max;
  47. unsigned int queue_depth;
  48. unsigned int default_depth;
  49. };
  50. static inline struct rq_qos *rq_qos_id(struct request_queue *q,
  51. enum rq_qos_id id)
  52. {
  53. struct rq_qos *rqos;
  54. for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
  55. if (rqos->id == id)
  56. break;
  57. }
  58. return rqos;
  59. }
  60. static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
  61. {
  62. return rq_qos_id(q, RQ_QOS_WBT);
  63. }
  64. static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
  65. {
  66. return rq_qos_id(q, RQ_QOS_LATENCY);
  67. }
  68. static inline void rq_wait_init(struct rq_wait *rq_wait)
  69. {
  70. atomic_set(&rq_wait->inflight, 0);
  71. init_waitqueue_head(&rq_wait->wait);
  72. }
  73. static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
  74. {
  75. /*
  76. * No IO can be in-flight when adding rqos, so freeze queue, which
  77. * is fine since we only support rq_qos for blk-mq queue.
  78. *
  79. * Reuse ->queue_lock for protecting against other concurrent
  80. * rq_qos adding/deleting
  81. */
  82. blk_mq_freeze_queue(q);
  83. spin_lock_irq(&q->queue_lock);
  84. if (rq_qos_id(q, rqos->id))
  85. goto ebusy;
  86. rqos->next = q->rq_qos;
  87. q->rq_qos = rqos;
  88. spin_unlock_irq(&q->queue_lock);
  89. blk_mq_unfreeze_queue(q);
  90. if (rqos->ops->debugfs_attrs) {
  91. mutex_lock(&q->debugfs_mutex);
  92. blk_mq_debugfs_register_rqos(rqos);
  93. mutex_unlock(&q->debugfs_mutex);
  94. }
  95. return 0;
  96. ebusy:
  97. spin_unlock_irq(&q->queue_lock);
  98. blk_mq_unfreeze_queue(q);
  99. return -EBUSY;
  100. }
  101. static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
  102. {
  103. struct rq_qos **cur;
  104. /*
  105. * See comment in rq_qos_add() about freezing queue & using
  106. * ->queue_lock.
  107. */
  108. blk_mq_freeze_queue(q);
  109. spin_lock_irq(&q->queue_lock);
  110. for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
  111. if (*cur == rqos) {
  112. *cur = rqos->next;
  113. break;
  114. }
  115. }
  116. spin_unlock_irq(&q->queue_lock);
  117. blk_mq_unfreeze_queue(q);
  118. mutex_lock(&q->debugfs_mutex);
  119. blk_mq_debugfs_unregister_rqos(rqos);
  120. mutex_unlock(&q->debugfs_mutex);
  121. }
  122. typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
  123. typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
  124. void rq_qos_wait(struct rq_wait *rqw, void *private_data,
  125. acquire_inflight_cb_t *acquire_inflight_cb,
  126. cleanup_cb_t *cleanup_cb);
  127. bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
  128. bool rq_depth_scale_up(struct rq_depth *rqd);
  129. bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
  130. bool rq_depth_calc_max_depth(struct rq_depth *rqd);
  131. void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
  132. void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
  133. void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
  134. void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
  135. void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
  136. void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
  137. void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
  138. void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
  139. void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
  140. static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
  141. {
  142. if (q->rq_qos)
  143. __rq_qos_cleanup(q->rq_qos, bio);
  144. }
  145. static inline void rq_qos_done(struct request_queue *q, struct request *rq)
  146. {
  147. if (q->rq_qos)
  148. __rq_qos_done(q->rq_qos, rq);
  149. }
  150. static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
  151. {
  152. if (q->rq_qos)
  153. __rq_qos_issue(q->rq_qos, rq);
  154. }
  155. static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
  156. {
  157. if (q->rq_qos)
  158. __rq_qos_requeue(q->rq_qos, rq);
  159. }
  160. static inline void rq_qos_done_bio(struct bio *bio)
  161. {
  162. if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
  163. bio_flagged(bio, BIO_QOS_MERGED))) {
  164. struct request_queue *q = bdev_get_queue(bio->bi_bdev);
  165. if (q->rq_qos)
  166. __rq_qos_done_bio(q->rq_qos, bio);
  167. }
  168. }
  169. static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
  170. {
  171. if (q->rq_qos) {
  172. bio_set_flag(bio, BIO_QOS_THROTTLED);
  173. __rq_qos_throttle(q->rq_qos, bio);
  174. }
  175. }
  176. static inline void rq_qos_track(struct request_queue *q, struct request *rq,
  177. struct bio *bio)
  178. {
  179. if (q->rq_qos)
  180. __rq_qos_track(q->rq_qos, rq, bio);
  181. }
  182. static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
  183. struct bio *bio)
  184. {
  185. if (q->rq_qos) {
  186. bio_set_flag(bio, BIO_QOS_MERGED);
  187. __rq_qos_merge(q->rq_qos, rq, bio);
  188. }
  189. }
  190. static inline void rq_qos_queue_depth_changed(struct request_queue *q)
  191. {
  192. if (q->rq_qos)
  193. __rq_qos_queue_depth_changed(q->rq_qos);
  194. }
  195. void rq_qos_exit(struct request_queue *);
  196. #endif