blk-mq-sched.h 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef BLK_MQ_SCHED_H
  3. #define BLK_MQ_SCHED_H
  4. #include "elevator.h"
  5. #include "blk-mq.h"
  6. #include "blk-mq-tag.h"
  7. #define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
  8. bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  9. unsigned int nr_segs, struct request **merged_request);
  10. bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
  11. unsigned int nr_segs);
  12. bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
  13. struct list_head *free);
  14. void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
  15. void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
  16. void blk_mq_sched_insert_request(struct request *rq, bool at_head,
  17. bool run_queue, bool async);
  18. void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
  19. struct blk_mq_ctx *ctx,
  20. struct list_head *list, bool run_queue_async);
  21. void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
  22. int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
  23. void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
  24. void blk_mq_sched_free_rqs(struct request_queue *q);
  25. static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
  26. {
  27. if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  28. __blk_mq_sched_restart(hctx);
  29. }
  30. static inline bool bio_mergeable(struct bio *bio)
  31. {
  32. return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
  33. }
  34. static inline bool
  35. blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
  36. struct bio *bio)
  37. {
  38. if (rq->rq_flags & RQF_ELV) {
  39. struct elevator_queue *e = q->elevator;
  40. if (e->type->ops.allow_merge)
  41. return e->type->ops.allow_merge(q, rq, bio);
  42. }
  43. return true;
  44. }
  45. static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
  46. {
  47. if (rq->rq_flags & RQF_ELV) {
  48. struct elevator_queue *e = rq->q->elevator;
  49. if (e->type->ops.completed_request)
  50. e->type->ops.completed_request(rq, now);
  51. }
  52. }
  53. static inline void blk_mq_sched_requeue_request(struct request *rq)
  54. {
  55. if (rq->rq_flags & RQF_ELV) {
  56. struct request_queue *q = rq->q;
  57. struct elevator_queue *e = q->elevator;
  58. if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request)
  59. e->type->ops.requeue_request(rq);
  60. }
  61. }
  62. static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
  63. {
  64. struct elevator_queue *e = hctx->queue->elevator;
  65. if (e && e->type->ops.has_work)
  66. return e->type->ops.has_work(hctx);
  67. return false;
  68. }
  69. static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
  70. {
  71. return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  72. }
  73. #endif