ssg-cgroup.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Control Group of SamSung Generic I/O scheduler
  4. *
  5. * Copyright (C) 2021 Changheun Lee <[email protected]>
  6. */
  7. #include <linux/blkdev.h>
  8. #include <linux/blk-mq.h>
  9. #include "blk-cgroup.h"
  10. #include "blk-mq.h"
  11. #include "blk-mq-tag.h"
  12. #include "ssg.h"
  13. static struct blkcg_policy ssg_blkcg_policy;
  14. #define CPD_TO_SSG_BLKCG(_cpd) \
  15. container_of_safe((_cpd), struct ssg_blkcg, cpd)
  16. #define BLKCG_TO_SSG_BLKCG(_blkcg) \
  17. CPD_TO_SSG_BLKCG(blkcg_to_cpd((_blkcg), &ssg_blkcg_policy))
  18. #define PD_TO_SSG_BLKG(_pd) \
  19. container_of_safe((_pd), struct ssg_blkg, pd)
  20. #define BLKG_TO_SSG_BLKG(_blkg) \
  21. PD_TO_SSG_BLKG(blkg_to_pd((_blkg), &ssg_blkcg_policy))
  22. #define CSS_TO_SSG_BLKCG(css) BLKCG_TO_SSG_BLKCG(css_to_blkcg(css))
  23. static struct blkcg_policy_data *ssg_blkcg_cpd_alloc(gfp_t gfp)
  24. {
  25. struct ssg_blkcg *ssg_blkcg;
  26. ssg_blkcg = kzalloc(sizeof(struct ssg_blkcg), gfp);
  27. if (ZERO_OR_NULL_PTR(ssg_blkcg))
  28. return NULL;
  29. return &ssg_blkcg->cpd;
  30. }
  31. static void ssg_blkcg_cpd_init(struct blkcg_policy_data *cpd)
  32. {
  33. struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd);
  34. if (IS_ERR_OR_NULL(ssg_blkcg))
  35. return;
  36. ssg_blkcg->max_available_ratio = 100;
  37. }
  38. static void ssg_blkcg_cpd_free(struct blkcg_policy_data *cpd)
  39. {
  40. struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd);
  41. if (IS_ERR_OR_NULL(ssg_blkcg))
  42. return;
  43. kfree(ssg_blkcg);
  44. }
  45. static void ssg_blkcg_set_shallow_depth(struct ssg_blkcg *ssg_blkcg,
  46. struct ssg_blkg *ssg_blkg, struct blk_mq_tags *tags)
  47. {
  48. unsigned int depth = tags->bitmap_tags.sb.depth;
  49. unsigned int map_nr = tags->bitmap_tags.sb.map_nr;
  50. ssg_blkg->max_available_rqs =
  51. depth * ssg_blkcg->max_available_ratio / 100U;
  52. ssg_blkg->shallow_depth =
  53. max_t(unsigned int, 1, ssg_blkg->max_available_rqs / map_nr);
  54. }
  55. static struct blkg_policy_data *ssg_blkcg_pd_alloc(gfp_t gfp,
  56. struct request_queue *q, struct blkcg *blkcg)
  57. {
  58. struct ssg_blkg *ssg_blkg;
  59. ssg_blkg = kzalloc_node(sizeof(struct ssg_blkg), gfp, q->node);
  60. if (ZERO_OR_NULL_PTR(ssg_blkg))
  61. return NULL;
  62. return &ssg_blkg->pd;
  63. }
  64. static void ssg_blkcg_pd_init(struct blkg_policy_data *pd)
  65. {
  66. struct ssg_blkg *ssg_blkg;
  67. struct ssg_blkcg *ssg_blkcg;
  68. struct blk_mq_hw_ctx *hctx;
  69. unsigned long i;
  70. ssg_blkg = PD_TO_SSG_BLKG(pd);
  71. if (IS_ERR_OR_NULL(ssg_blkg))
  72. return;
  73. ssg_blkcg = BLKCG_TO_SSG_BLKCG(pd->blkg->blkcg);
  74. if (IS_ERR_OR_NULL(ssg_blkcg))
  75. return;
  76. atomic_set(&ssg_blkg->current_rqs, 0);
  77. queue_for_each_hw_ctx(pd->blkg->q, hctx, i)
  78. ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg,
  79. hctx->sched_tags);
  80. }
  81. static void ssg_blkcg_pd_free(struct blkg_policy_data *pd)
  82. {
  83. struct ssg_blkg *ssg_blkg = PD_TO_SSG_BLKG(pd);
  84. if (IS_ERR_OR_NULL(ssg_blkg))
  85. return;
  86. kfree(ssg_blkg);
  87. }
  88. unsigned int ssg_blkcg_shallow_depth(struct request_queue *q)
  89. {
  90. struct blkcg_gq *blkg;
  91. struct ssg_blkg *ssg_blkg;
  92. rcu_read_lock();
  93. blkg = blkg_lookup(css_to_blkcg(curr_css()), q);
  94. ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
  95. rcu_read_unlock();
  96. if (IS_ERR_OR_NULL(ssg_blkg))
  97. return 0;
  98. if (atomic_read(&ssg_blkg->current_rqs) < ssg_blkg->max_available_rqs)
  99. return 0;
  100. return ssg_blkg->shallow_depth;
  101. }
  102. void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx)
  103. {
  104. struct request_queue *q = hctx->queue;
  105. struct cgroup_subsys_state *pos_css;
  106. struct blkcg_gq *blkg;
  107. struct ssg_blkg *ssg_blkg;
  108. struct ssg_blkcg *ssg_blkcg;
  109. rcu_read_lock();
  110. blkg_for_each_descendant_pre(blkg, pos_css, q->root_blkg) {
  111. ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
  112. if (IS_ERR_OR_NULL(ssg_blkg))
  113. continue;
  114. ssg_blkcg = BLKCG_TO_SSG_BLKCG(blkg->blkcg);
  115. if (IS_ERR_OR_NULL(ssg_blkcg))
  116. continue;
  117. atomic_set(&ssg_blkg->current_rqs, 0);
  118. ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, hctx->sched_tags);
  119. }
  120. rcu_read_unlock();
  121. }
  122. void ssg_blkcg_inc_rq(struct blkcg_gq *blkg)
  123. {
  124. struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
  125. if (IS_ERR_OR_NULL(ssg_blkg))
  126. return;
  127. atomic_inc(&ssg_blkg->current_rqs);
  128. }
  129. void ssg_blkcg_dec_rq(struct blkcg_gq *blkg)
  130. {
  131. struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
  132. if (IS_ERR_OR_NULL(ssg_blkg))
  133. return;
  134. atomic_dec(&ssg_blkg->current_rqs);
  135. }
  136. static int ssg_blkcg_show_max_available_ratio(struct seq_file *sf, void *v)
  137. {
  138. struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(seq_css(sf));
  139. if (IS_ERR_OR_NULL(ssg_blkcg))
  140. return -EINVAL;
  141. seq_printf(sf, "%d\n", ssg_blkcg->max_available_ratio);
  142. return 0;
  143. }
  144. static int ssg_blkcg_set_max_available_ratio(struct cgroup_subsys_state *css,
  145. struct cftype *cftype, u64 ratio)
  146. {
  147. struct blkcg *blkcg = css_to_blkcg(css);
  148. struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(css);
  149. struct blkcg_gq *blkg;
  150. struct ssg_blkg *ssg_blkg;
  151. struct blk_mq_hw_ctx *hctx;
  152. unsigned long i;
  153. if (IS_ERR_OR_NULL(ssg_blkcg))
  154. return -EINVAL;
  155. if (ratio > 100)
  156. return -EINVAL;
  157. spin_lock_irq(&blkcg->lock);
  158. ssg_blkcg->max_available_ratio = ratio;
  159. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  160. ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
  161. if (IS_ERR_OR_NULL(ssg_blkg))
  162. continue;
  163. queue_for_each_hw_ctx(blkg->q, hctx, i)
  164. ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg,
  165. hctx->sched_tags);
  166. }
  167. spin_unlock_irq(&blkcg->lock);
  168. return 0;
  169. }
  170. struct cftype ssg_blkg_files[] = {
  171. {
  172. .name = "ssg.max_available_ratio",
  173. .flags = CFTYPE_NOT_ON_ROOT,
  174. .seq_show = ssg_blkcg_show_max_available_ratio,
  175. .write_u64 = ssg_blkcg_set_max_available_ratio,
  176. },
  177. {} /* terminate */
  178. };
  179. static struct blkcg_policy ssg_blkcg_policy = {
  180. .legacy_cftypes = ssg_blkg_files,
  181. .cpd_alloc_fn = ssg_blkcg_cpd_alloc,
  182. .cpd_init_fn = ssg_blkcg_cpd_init,
  183. .cpd_free_fn = ssg_blkcg_cpd_free,
  184. .pd_alloc_fn = ssg_blkcg_pd_alloc,
  185. .pd_init_fn = ssg_blkcg_pd_init,
  186. .pd_free_fn = ssg_blkcg_pd_free,
  187. };
  188. int ssg_blkcg_activate(struct request_queue *q)
  189. {
  190. return blkcg_activate_policy(q, &ssg_blkcg_policy);
  191. }
  192. void ssg_blkcg_deactivate(struct request_queue *q)
  193. {
  194. blkcg_deactivate_policy(q, &ssg_blkcg_policy);
  195. }
  196. int ssg_blkcg_init(void)
  197. {
  198. return blkcg_policy_register(&ssg_blkcg_policy);
  199. }
  200. void ssg_blkcg_exit(void)
  201. {
  202. blkcg_policy_unregister(&ssg_blkcg_policy);
  203. }