blk-mq-sysfs.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/backing-dev.h>
  5. #include <linux/bio.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/mm.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/smp.h>
  12. #include <linux/blk-mq.h>
  13. #include "blk.h"
  14. #include "blk-mq.h"
  15. #include "blk-mq-tag.h"
  16. static void blk_mq_sysfs_release(struct kobject *kobj)
  17. {
  18. struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
  19. free_percpu(ctxs->queue_ctx);
  20. kfree(ctxs);
  21. }
  22. static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
  23. {
  24. struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  25. /* ctx->ctxs won't be released until all ctx are freed */
  26. kobject_put(&ctx->ctxs->kobj);
  27. }
  28. static void blk_mq_hw_sysfs_release(struct kobject *kobj)
  29. {
  30. struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
  31. kobj);
  32. blk_free_flush_queue(hctx->fq);
  33. sbitmap_free(&hctx->ctx_map);
  34. free_cpumask_var(hctx->cpumask);
  35. kfree(hctx->ctxs);
  36. kfree(hctx);
  37. }
  38. struct blk_mq_hw_ctx_sysfs_entry {
  39. struct attribute attr;
  40. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  41. ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  42. };
  43. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  44. struct attribute *attr, char *page)
  45. {
  46. struct blk_mq_hw_ctx_sysfs_entry *entry;
  47. struct blk_mq_hw_ctx *hctx;
  48. struct request_queue *q;
  49. ssize_t res;
  50. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  51. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  52. q = hctx->queue;
  53. if (!entry->show)
  54. return -EIO;
  55. mutex_lock(&q->sysfs_lock);
  56. res = entry->show(hctx, page);
  57. mutex_unlock(&q->sysfs_lock);
  58. return res;
  59. }
  60. static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  61. struct attribute *attr, const char *page,
  62. size_t length)
  63. {
  64. struct blk_mq_hw_ctx_sysfs_entry *entry;
  65. struct blk_mq_hw_ctx *hctx;
  66. struct request_queue *q;
  67. ssize_t res;
  68. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  69. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  70. q = hctx->queue;
  71. if (!entry->store)
  72. return -EIO;
  73. mutex_lock(&q->sysfs_lock);
  74. res = entry->store(hctx, page, length);
  75. mutex_unlock(&q->sysfs_lock);
  76. return res;
  77. }
  78. static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
  79. char *page)
  80. {
  81. return sprintf(page, "%u\n", hctx->tags->nr_tags);
  82. }
  83. static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
  84. char *page)
  85. {
  86. return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
  87. }
  88. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  89. {
  90. const size_t size = PAGE_SIZE - 1;
  91. unsigned int i, first = 1;
  92. int ret = 0, pos = 0;
  93. for_each_cpu(i, hctx->cpumask) {
  94. if (first)
  95. ret = snprintf(pos + page, size - pos, "%u", i);
  96. else
  97. ret = snprintf(pos + page, size - pos, ", %u", i);
  98. if (ret >= size - pos)
  99. break;
  100. first = 0;
  101. pos += ret;
  102. }
  103. ret = snprintf(pos + page, size + 1 - pos, "\n");
  104. return pos + ret;
  105. }
  106. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
  107. .attr = {.name = "nr_tags", .mode = 0444 },
  108. .show = blk_mq_hw_sysfs_nr_tags_show,
  109. };
  110. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
  111. .attr = {.name = "nr_reserved_tags", .mode = 0444 },
  112. .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
  113. };
  114. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  115. .attr = {.name = "cpu_list", .mode = 0444 },
  116. .show = blk_mq_hw_sysfs_cpus_show,
  117. };
  118. static struct attribute *default_hw_ctx_attrs[] = {
  119. &blk_mq_hw_sysfs_nr_tags.attr,
  120. &blk_mq_hw_sysfs_nr_reserved_tags.attr,
  121. &blk_mq_hw_sysfs_cpus.attr,
  122. NULL,
  123. };
  124. ATTRIBUTE_GROUPS(default_hw_ctx);
  125. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  126. .show = blk_mq_hw_sysfs_show,
  127. .store = blk_mq_hw_sysfs_store,
  128. };
  129. static struct kobj_type blk_mq_ktype = {
  130. .release = blk_mq_sysfs_release,
  131. };
  132. static struct kobj_type blk_mq_ctx_ktype = {
  133. .release = blk_mq_ctx_sysfs_release,
  134. };
  135. static struct kobj_type blk_mq_hw_ktype = {
  136. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  137. .default_groups = default_hw_ctx_groups,
  138. .release = blk_mq_hw_sysfs_release,
  139. };
  140. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  141. {
  142. struct blk_mq_ctx *ctx;
  143. int i;
  144. if (!hctx->nr_ctx)
  145. return;
  146. hctx_for_each_ctx(hctx, ctx, i)
  147. kobject_del(&ctx->kobj);
  148. kobject_del(&hctx->kobj);
  149. }
  150. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  151. {
  152. struct request_queue *q = hctx->queue;
  153. struct blk_mq_ctx *ctx;
  154. int i, j, ret;
  155. if (!hctx->nr_ctx)
  156. return 0;
  157. ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
  158. if (ret)
  159. return ret;
  160. hctx_for_each_ctx(hctx, ctx, i) {
  161. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  162. if (ret)
  163. goto out;
  164. }
  165. return 0;
  166. out:
  167. hctx_for_each_ctx(hctx, ctx, j) {
  168. if (j < i)
  169. kobject_del(&ctx->kobj);
  170. }
  171. kobject_del(&hctx->kobj);
  172. return ret;
  173. }
  174. void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  175. {
  176. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  177. }
  178. void blk_mq_sysfs_deinit(struct request_queue *q)
  179. {
  180. struct blk_mq_ctx *ctx;
  181. int cpu;
  182. for_each_possible_cpu(cpu) {
  183. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  184. kobject_put(&ctx->kobj);
  185. }
  186. kobject_put(q->mq_kobj);
  187. }
  188. void blk_mq_sysfs_init(struct request_queue *q)
  189. {
  190. struct blk_mq_ctx *ctx;
  191. int cpu;
  192. kobject_init(q->mq_kobj, &blk_mq_ktype);
  193. for_each_possible_cpu(cpu) {
  194. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  195. kobject_get(q->mq_kobj);
  196. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  197. }
  198. }
  199. int blk_mq_sysfs_register(struct gendisk *disk)
  200. {
  201. struct request_queue *q = disk->queue;
  202. struct blk_mq_hw_ctx *hctx;
  203. unsigned long i, j;
  204. int ret;
  205. lockdep_assert_held(&q->sysfs_dir_lock);
  206. ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
  207. if (ret < 0)
  208. goto out;
  209. kobject_uevent(q->mq_kobj, KOBJ_ADD);
  210. queue_for_each_hw_ctx(q, hctx, i) {
  211. ret = blk_mq_register_hctx(hctx);
  212. if (ret)
  213. goto unreg;
  214. }
  215. q->mq_sysfs_init_done = true;
  216. out:
  217. return ret;
  218. unreg:
  219. queue_for_each_hw_ctx(q, hctx, j) {
  220. if (j < i)
  221. blk_mq_unregister_hctx(hctx);
  222. }
  223. kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
  224. kobject_del(q->mq_kobj);
  225. return ret;
  226. }
  227. void blk_mq_sysfs_unregister(struct gendisk *disk)
  228. {
  229. struct request_queue *q = disk->queue;
  230. struct blk_mq_hw_ctx *hctx;
  231. unsigned long i;
  232. lockdep_assert_held(&q->sysfs_dir_lock);
  233. queue_for_each_hw_ctx(q, hctx, i)
  234. blk_mq_unregister_hctx(hctx);
  235. kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
  236. kobject_del(q->mq_kobj);
  237. q->mq_sysfs_init_done = false;
  238. }
  239. void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
  240. {
  241. struct blk_mq_hw_ctx *hctx;
  242. unsigned long i;
  243. mutex_lock(&q->sysfs_dir_lock);
  244. if (!q->mq_sysfs_init_done)
  245. goto unlock;
  246. queue_for_each_hw_ctx(q, hctx, i)
  247. blk_mq_unregister_hctx(hctx);
  248. unlock:
  249. mutex_unlock(&q->sysfs_dir_lock);
  250. }
  251. int blk_mq_sysfs_register_hctxs(struct request_queue *q)
  252. {
  253. struct blk_mq_hw_ctx *hctx;
  254. unsigned long i;
  255. int ret = 0;
  256. mutex_lock(&q->sysfs_dir_lock);
  257. if (!q->mq_sysfs_init_done)
  258. goto unlock;
  259. queue_for_each_hw_ctx(q, hctx, i) {
  260. ret = blk_mq_register_hctx(hctx);
  261. if (ret)
  262. break;
  263. }
  264. unlock:
  265. mutex_unlock(&q->sysfs_dir_lock);
  266. return ret;
  267. }