blk-mq-debugfs.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Facebook
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/blk-mq.h>
  9. #include "blk.h"
  10. #include "blk-mq.h"
  11. #include "blk-mq-debugfs.h"
  12. #include "blk-mq-sched.h"
  13. #include "blk-mq-tag.h"
  14. #include "blk-rq-qos.h"
  15. static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  16. {
  17. if (stat->nr_samples) {
  18. seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
  19. stat->nr_samples, stat->mean, stat->min, stat->max);
  20. } else {
  21. seq_puts(m, "samples=0");
  22. }
  23. }
  24. static int queue_poll_stat_show(void *data, struct seq_file *m)
  25. {
  26. struct request_queue *q = data;
  27. int bucket;
  28. if (!q->poll_stat)
  29. return 0;
  30. for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
  31. seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
  32. print_stat(m, &q->poll_stat[2 * bucket]);
  33. seq_puts(m, "\n");
  34. seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
  35. print_stat(m, &q->poll_stat[2 * bucket + 1]);
  36. seq_puts(m, "\n");
  37. }
  38. return 0;
  39. }
  40. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  41. __acquires(&q->requeue_lock)
  42. {
  43. struct request_queue *q = m->private;
  44. spin_lock_irq(&q->requeue_lock);
  45. return seq_list_start(&q->requeue_list, *pos);
  46. }
  47. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  48. {
  49. struct request_queue *q = m->private;
  50. return seq_list_next(v, &q->requeue_list, pos);
  51. }
  52. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  53. __releases(&q->requeue_lock)
  54. {
  55. struct request_queue *q = m->private;
  56. spin_unlock_irq(&q->requeue_lock);
  57. }
  58. static const struct seq_operations queue_requeue_list_seq_ops = {
  59. .start = queue_requeue_list_start,
  60. .next = queue_requeue_list_next,
  61. .stop = queue_requeue_list_stop,
  62. .show = blk_mq_debugfs_rq_show,
  63. };
  64. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  65. const char *const *flag_name, int flag_name_count)
  66. {
  67. bool sep = false;
  68. int i;
  69. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  70. if (!(flags & BIT(i)))
  71. continue;
  72. if (sep)
  73. seq_puts(m, "|");
  74. sep = true;
  75. if (i < flag_name_count && flag_name[i])
  76. seq_puts(m, flag_name[i]);
  77. else
  78. seq_printf(m, "%d", i);
  79. }
  80. return 0;
  81. }
  82. static int queue_pm_only_show(void *data, struct seq_file *m)
  83. {
  84. struct request_queue *q = data;
  85. seq_printf(m, "%d\n", atomic_read(&q->pm_only));
  86. return 0;
  87. }
  88. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  89. static const char *const blk_queue_flag_name[] = {
  90. QUEUE_FLAG_NAME(STOPPED),
  91. QUEUE_FLAG_NAME(DYING),
  92. QUEUE_FLAG_NAME(NOMERGES),
  93. QUEUE_FLAG_NAME(SAME_COMP),
  94. QUEUE_FLAG_NAME(FAIL_IO),
  95. QUEUE_FLAG_NAME(NONROT),
  96. QUEUE_FLAG_NAME(IO_STAT),
  97. QUEUE_FLAG_NAME(NOXMERGES),
  98. QUEUE_FLAG_NAME(ADD_RANDOM),
  99. QUEUE_FLAG_NAME(SAME_FORCE),
  100. QUEUE_FLAG_NAME(INIT_DONE),
  101. QUEUE_FLAG_NAME(STABLE_WRITES),
  102. QUEUE_FLAG_NAME(POLL),
  103. QUEUE_FLAG_NAME(WC),
  104. QUEUE_FLAG_NAME(FUA),
  105. QUEUE_FLAG_NAME(DAX),
  106. QUEUE_FLAG_NAME(STATS),
  107. QUEUE_FLAG_NAME(REGISTERED),
  108. QUEUE_FLAG_NAME(QUIESCED),
  109. QUEUE_FLAG_NAME(PCI_P2PDMA),
  110. QUEUE_FLAG_NAME(ZONE_RESETALL),
  111. QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
  112. QUEUE_FLAG_NAME(HCTX_ACTIVE),
  113. QUEUE_FLAG_NAME(NOWAIT),
  114. };
  115. #undef QUEUE_FLAG_NAME
  116. static int queue_state_show(void *data, struct seq_file *m)
  117. {
  118. struct request_queue *q = data;
  119. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  120. ARRAY_SIZE(blk_queue_flag_name));
  121. seq_puts(m, "\n");
  122. return 0;
  123. }
  124. static ssize_t queue_state_write(void *data, const char __user *buf,
  125. size_t count, loff_t *ppos)
  126. {
  127. struct request_queue *q = data;
  128. char opbuf[16] = { }, *op;
  129. /*
  130. * The "state" attribute is removed when the queue is removed. Don't
  131. * allow setting the state on a dying queue to avoid a use-after-free.
  132. */
  133. if (blk_queue_dying(q))
  134. return -ENOENT;
  135. if (count >= sizeof(opbuf)) {
  136. pr_err("%s: operation too long\n", __func__);
  137. goto inval;
  138. }
  139. if (copy_from_user(opbuf, buf, count))
  140. return -EFAULT;
  141. op = strstrip(opbuf);
  142. if (strcmp(op, "run") == 0) {
  143. blk_mq_run_hw_queues(q, true);
  144. } else if (strcmp(op, "start") == 0) {
  145. blk_mq_start_stopped_hw_queues(q, true);
  146. } else if (strcmp(op, "kick") == 0) {
  147. blk_mq_kick_requeue_list(q);
  148. } else {
  149. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  150. inval:
  151. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  152. return -EINVAL;
  153. }
  154. return count;
  155. }
  156. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  157. { "poll_stat", 0400, queue_poll_stat_show },
  158. { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
  159. { "pm_only", 0600, queue_pm_only_show, NULL },
  160. { "state", 0600, queue_state_show, queue_state_write },
  161. { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
  162. { },
  163. };
  164. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  165. static const char *const hctx_state_name[] = {
  166. HCTX_STATE_NAME(STOPPED),
  167. HCTX_STATE_NAME(TAG_ACTIVE),
  168. HCTX_STATE_NAME(SCHED_RESTART),
  169. HCTX_STATE_NAME(INACTIVE),
  170. };
  171. #undef HCTX_STATE_NAME
  172. static int hctx_state_show(void *data, struct seq_file *m)
  173. {
  174. struct blk_mq_hw_ctx *hctx = data;
  175. blk_flags_show(m, hctx->state, hctx_state_name,
  176. ARRAY_SIZE(hctx_state_name));
  177. seq_puts(m, "\n");
  178. return 0;
  179. }
  180. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  181. static const char *const alloc_policy_name[] = {
  182. BLK_TAG_ALLOC_NAME(FIFO),
  183. BLK_TAG_ALLOC_NAME(RR),
  184. };
  185. #undef BLK_TAG_ALLOC_NAME
  186. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  187. static const char *const hctx_flag_name[] = {
  188. HCTX_FLAG_NAME(SHOULD_MERGE),
  189. HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
  190. HCTX_FLAG_NAME(BLOCKING),
  191. HCTX_FLAG_NAME(NO_SCHED),
  192. HCTX_FLAG_NAME(STACKING),
  193. HCTX_FLAG_NAME(TAG_HCTX_SHARED),
  194. };
  195. #undef HCTX_FLAG_NAME
  196. static int hctx_flags_show(void *data, struct seq_file *m)
  197. {
  198. struct blk_mq_hw_ctx *hctx = data;
  199. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  200. seq_puts(m, "alloc_policy=");
  201. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  202. alloc_policy_name[alloc_policy])
  203. seq_puts(m, alloc_policy_name[alloc_policy]);
  204. else
  205. seq_printf(m, "%d", alloc_policy);
  206. seq_puts(m, " ");
  207. blk_flags_show(m,
  208. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  209. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  210. seq_puts(m, "\n");
  211. return 0;
  212. }
  213. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  214. static const char *const cmd_flag_name[] = {
  215. CMD_FLAG_NAME(FAILFAST_DEV),
  216. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  217. CMD_FLAG_NAME(FAILFAST_DRIVER),
  218. CMD_FLAG_NAME(SYNC),
  219. CMD_FLAG_NAME(META),
  220. CMD_FLAG_NAME(PRIO),
  221. CMD_FLAG_NAME(NOMERGE),
  222. CMD_FLAG_NAME(IDLE),
  223. CMD_FLAG_NAME(INTEGRITY),
  224. CMD_FLAG_NAME(FUA),
  225. CMD_FLAG_NAME(PREFLUSH),
  226. CMD_FLAG_NAME(RAHEAD),
  227. CMD_FLAG_NAME(BACKGROUND),
  228. CMD_FLAG_NAME(NOWAIT),
  229. CMD_FLAG_NAME(NOUNMAP),
  230. CMD_FLAG_NAME(POLLED),
  231. };
  232. #undef CMD_FLAG_NAME
  233. #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
  234. static const char *const rqf_name[] = {
  235. RQF_NAME(STARTED),
  236. RQF_NAME(SOFTBARRIER),
  237. RQF_NAME(FLUSH_SEQ),
  238. RQF_NAME(MIXED_MERGE),
  239. RQF_NAME(MQ_INFLIGHT),
  240. RQF_NAME(DONTPREP),
  241. RQF_NAME(FAILED),
  242. RQF_NAME(QUIET),
  243. RQF_NAME(ELVPRIV),
  244. RQF_NAME(IO_STAT),
  245. RQF_NAME(PM),
  246. RQF_NAME(HASHED),
  247. RQF_NAME(STATS),
  248. RQF_NAME(SPECIAL_PAYLOAD),
  249. RQF_NAME(ZONE_WRITE_LOCKED),
  250. RQF_NAME(MQ_POLL_SLEPT),
  251. RQF_NAME(TIMED_OUT),
  252. RQF_NAME(ELV),
  253. RQF_NAME(RESV),
  254. };
  255. #undef RQF_NAME
  256. static const char *const blk_mq_rq_state_name_array[] = {
  257. [MQ_RQ_IDLE] = "idle",
  258. [MQ_RQ_IN_FLIGHT] = "in_flight",
  259. [MQ_RQ_COMPLETE] = "complete",
  260. };
  261. static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
  262. {
  263. if (WARN_ON_ONCE((unsigned int)rq_state >=
  264. ARRAY_SIZE(blk_mq_rq_state_name_array)))
  265. return "(?)";
  266. return blk_mq_rq_state_name_array[rq_state];
  267. }
  268. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  269. {
  270. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  271. const enum req_op op = req_op(rq);
  272. const char *op_str = blk_op_str(op);
  273. seq_printf(m, "%p {.op=", rq);
  274. if (strcmp(op_str, "UNKNOWN") == 0)
  275. seq_printf(m, "%u", op);
  276. else
  277. seq_printf(m, "%s", op_str);
  278. seq_puts(m, ", .cmd_flags=");
  279. blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
  280. cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
  281. seq_puts(m, ", .rq_flags=");
  282. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  283. ARRAY_SIZE(rqf_name));
  284. seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
  285. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  286. rq->internal_tag);
  287. if (mq_ops->show_rq)
  288. mq_ops->show_rq(m, rq);
  289. seq_puts(m, "}\n");
  290. return 0;
  291. }
  292. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  293. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  294. {
  295. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  296. }
  297. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  298. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  299. __acquires(&hctx->lock)
  300. {
  301. struct blk_mq_hw_ctx *hctx = m->private;
  302. spin_lock(&hctx->lock);
  303. return seq_list_start(&hctx->dispatch, *pos);
  304. }
  305. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  306. {
  307. struct blk_mq_hw_ctx *hctx = m->private;
  308. return seq_list_next(v, &hctx->dispatch, pos);
  309. }
  310. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  311. __releases(&hctx->lock)
  312. {
  313. struct blk_mq_hw_ctx *hctx = m->private;
  314. spin_unlock(&hctx->lock);
  315. }
  316. static const struct seq_operations hctx_dispatch_seq_ops = {
  317. .start = hctx_dispatch_start,
  318. .next = hctx_dispatch_next,
  319. .stop = hctx_dispatch_stop,
  320. .show = blk_mq_debugfs_rq_show,
  321. };
  322. struct show_busy_params {
  323. struct seq_file *m;
  324. struct blk_mq_hw_ctx *hctx;
  325. };
  326. /*
  327. * Note: the state of a request may change while this function is in progress,
  328. * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
  329. * keep iterating requests.
  330. */
  331. static bool hctx_show_busy_rq(struct request *rq, void *data)
  332. {
  333. const struct show_busy_params *params = data;
  334. if (rq->mq_hctx == params->hctx)
  335. __blk_mq_debugfs_rq_show(params->m, rq);
  336. return true;
  337. }
  338. static int hctx_busy_show(void *data, struct seq_file *m)
  339. {
  340. struct blk_mq_hw_ctx *hctx = data;
  341. struct show_busy_params params = { .m = m, .hctx = hctx };
  342. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  343. &params);
  344. return 0;
  345. }
  346. static const char *const hctx_types[] = {
  347. [HCTX_TYPE_DEFAULT] = "default",
  348. [HCTX_TYPE_READ] = "read",
  349. [HCTX_TYPE_POLL] = "poll",
  350. };
  351. static int hctx_type_show(void *data, struct seq_file *m)
  352. {
  353. struct blk_mq_hw_ctx *hctx = data;
  354. BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
  355. seq_printf(m, "%s\n", hctx_types[hctx->type]);
  356. return 0;
  357. }
  358. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  359. {
  360. struct blk_mq_hw_ctx *hctx = data;
  361. sbitmap_bitmap_show(&hctx->ctx_map, m);
  362. return 0;
  363. }
  364. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  365. struct blk_mq_tags *tags)
  366. {
  367. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  368. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  369. seq_printf(m, "active_queues=%d\n",
  370. atomic_read(&tags->active_queues));
  371. seq_puts(m, "\nbitmap_tags:\n");
  372. sbitmap_queue_show(&tags->bitmap_tags, m);
  373. if (tags->nr_reserved_tags) {
  374. seq_puts(m, "\nbreserved_tags:\n");
  375. sbitmap_queue_show(&tags->breserved_tags, m);
  376. }
  377. }
  378. static int hctx_tags_show(void *data, struct seq_file *m)
  379. {
  380. struct blk_mq_hw_ctx *hctx = data;
  381. struct request_queue *q = hctx->queue;
  382. int res;
  383. res = mutex_lock_interruptible(&q->sysfs_lock);
  384. if (res)
  385. goto out;
  386. if (hctx->tags)
  387. blk_mq_debugfs_tags_show(m, hctx->tags);
  388. mutex_unlock(&q->sysfs_lock);
  389. out:
  390. return res;
  391. }
  392. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  393. {
  394. struct blk_mq_hw_ctx *hctx = data;
  395. struct request_queue *q = hctx->queue;
  396. int res;
  397. res = mutex_lock_interruptible(&q->sysfs_lock);
  398. if (res)
  399. goto out;
  400. if (hctx->tags)
  401. sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
  402. mutex_unlock(&q->sysfs_lock);
  403. out:
  404. return res;
  405. }
  406. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  407. {
  408. struct blk_mq_hw_ctx *hctx = data;
  409. struct request_queue *q = hctx->queue;
  410. int res;
  411. res = mutex_lock_interruptible(&q->sysfs_lock);
  412. if (res)
  413. goto out;
  414. if (hctx->sched_tags)
  415. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  416. mutex_unlock(&q->sysfs_lock);
  417. out:
  418. return res;
  419. }
  420. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  421. {
  422. struct blk_mq_hw_ctx *hctx = data;
  423. struct request_queue *q = hctx->queue;
  424. int res;
  425. res = mutex_lock_interruptible(&q->sysfs_lock);
  426. if (res)
  427. goto out;
  428. if (hctx->sched_tags)
  429. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
  430. mutex_unlock(&q->sysfs_lock);
  431. out:
  432. return res;
  433. }
  434. static int hctx_run_show(void *data, struct seq_file *m)
  435. {
  436. struct blk_mq_hw_ctx *hctx = data;
  437. seq_printf(m, "%lu\n", hctx->run);
  438. return 0;
  439. }
  440. static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
  441. loff_t *ppos)
  442. {
  443. struct blk_mq_hw_ctx *hctx = data;
  444. hctx->run = 0;
  445. return count;
  446. }
  447. static int hctx_active_show(void *data, struct seq_file *m)
  448. {
  449. struct blk_mq_hw_ctx *hctx = data;
  450. seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
  451. return 0;
  452. }
  453. static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
  454. {
  455. struct blk_mq_hw_ctx *hctx = data;
  456. seq_printf(m, "%u\n", hctx->dispatch_busy);
  457. return 0;
  458. }
  459. #define CTX_RQ_SEQ_OPS(name, type) \
  460. static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
  461. __acquires(&ctx->lock) \
  462. { \
  463. struct blk_mq_ctx *ctx = m->private; \
  464. \
  465. spin_lock(&ctx->lock); \
  466. return seq_list_start(&ctx->rq_lists[type], *pos); \
  467. } \
  468. \
  469. static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
  470. loff_t *pos) \
  471. { \
  472. struct blk_mq_ctx *ctx = m->private; \
  473. \
  474. return seq_list_next(v, &ctx->rq_lists[type], pos); \
  475. } \
  476. \
  477. static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
  478. __releases(&ctx->lock) \
  479. { \
  480. struct blk_mq_ctx *ctx = m->private; \
  481. \
  482. spin_unlock(&ctx->lock); \
  483. } \
  484. \
  485. static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
  486. .start = ctx_##name##_rq_list_start, \
  487. .next = ctx_##name##_rq_list_next, \
  488. .stop = ctx_##name##_rq_list_stop, \
  489. .show = blk_mq_debugfs_rq_show, \
  490. }
  491. CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
  492. CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
  493. CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
  494. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  495. {
  496. const struct blk_mq_debugfs_attr *attr = m->private;
  497. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  498. return attr->show(data, m);
  499. }
  500. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  501. size_t count, loff_t *ppos)
  502. {
  503. struct seq_file *m = file->private_data;
  504. const struct blk_mq_debugfs_attr *attr = m->private;
  505. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  506. /*
  507. * Attributes that only implement .seq_ops are read-only and 'attr' is
  508. * the same with 'data' in this case.
  509. */
  510. if (attr == data || !attr->write)
  511. return -EPERM;
  512. return attr->write(data, buf, count, ppos);
  513. }
  514. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  515. {
  516. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  517. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  518. struct seq_file *m;
  519. int ret;
  520. if (attr->seq_ops) {
  521. ret = seq_open(file, attr->seq_ops);
  522. if (!ret) {
  523. m = file->private_data;
  524. m->private = data;
  525. }
  526. return ret;
  527. }
  528. if (WARN_ON_ONCE(!attr->show))
  529. return -EPERM;
  530. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  531. }
  532. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  533. {
  534. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  535. if (attr->show)
  536. return single_release(inode, file);
  537. return seq_release(inode, file);
  538. }
  539. static const struct file_operations blk_mq_debugfs_fops = {
  540. .open = blk_mq_debugfs_open,
  541. .read = seq_read,
  542. .write = blk_mq_debugfs_write,
  543. .llseek = seq_lseek,
  544. .release = blk_mq_debugfs_release,
  545. };
  546. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  547. {"state", 0400, hctx_state_show},
  548. {"flags", 0400, hctx_flags_show},
  549. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  550. {"busy", 0400, hctx_busy_show},
  551. {"ctx_map", 0400, hctx_ctx_map_show},
  552. {"tags", 0400, hctx_tags_show},
  553. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  554. {"sched_tags", 0400, hctx_sched_tags_show},
  555. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  556. {"run", 0600, hctx_run_show, hctx_run_write},
  557. {"active", 0400, hctx_active_show},
  558. {"dispatch_busy", 0400, hctx_dispatch_busy_show},
  559. {"type", 0400, hctx_type_show},
  560. {},
  561. };
  562. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  563. {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
  564. {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
  565. {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
  566. {},
  567. };
  568. static void debugfs_create_files(struct dentry *parent, void *data,
  569. const struct blk_mq_debugfs_attr *attr)
  570. {
  571. if (IS_ERR_OR_NULL(parent))
  572. return;
  573. d_inode(parent)->i_private = data;
  574. for (; attr->name; attr++)
  575. debugfs_create_file(attr->name, attr->mode, parent,
  576. (void *)attr, &blk_mq_debugfs_fops);
  577. }
  578. void blk_mq_debugfs_register(struct request_queue *q)
  579. {
  580. struct blk_mq_hw_ctx *hctx;
  581. unsigned long i;
  582. debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
  583. /*
  584. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  585. * didn't exist yet (because we don't know what to name the directory
  586. * until the queue is registered to a gendisk).
  587. */
  588. if (q->elevator && !q->sched_debugfs_dir)
  589. blk_mq_debugfs_register_sched(q);
  590. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  591. queue_for_each_hw_ctx(q, hctx, i) {
  592. if (!hctx->debugfs_dir)
  593. blk_mq_debugfs_register_hctx(q, hctx);
  594. if (q->elevator && !hctx->sched_debugfs_dir)
  595. blk_mq_debugfs_register_sched_hctx(q, hctx);
  596. }
  597. if (q->rq_qos) {
  598. struct rq_qos *rqos = q->rq_qos;
  599. while (rqos) {
  600. blk_mq_debugfs_register_rqos(rqos);
  601. rqos = rqos->next;
  602. }
  603. }
  604. }
  605. static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  606. struct blk_mq_ctx *ctx)
  607. {
  608. struct dentry *ctx_dir;
  609. char name[20];
  610. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  611. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  612. debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
  613. }
  614. void blk_mq_debugfs_register_hctx(struct request_queue *q,
  615. struct blk_mq_hw_ctx *hctx)
  616. {
  617. struct blk_mq_ctx *ctx;
  618. char name[20];
  619. int i;
  620. if (!q->debugfs_dir)
  621. return;
  622. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  623. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  624. debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
  625. hctx_for_each_ctx(hctx, ctx, i)
  626. blk_mq_debugfs_register_ctx(hctx, ctx);
  627. }
  628. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  629. {
  630. if (!hctx->queue->debugfs_dir)
  631. return;
  632. debugfs_remove_recursive(hctx->debugfs_dir);
  633. hctx->sched_debugfs_dir = NULL;
  634. hctx->debugfs_dir = NULL;
  635. }
  636. void blk_mq_debugfs_register_hctxs(struct request_queue *q)
  637. {
  638. struct blk_mq_hw_ctx *hctx;
  639. unsigned long i;
  640. queue_for_each_hw_ctx(q, hctx, i)
  641. blk_mq_debugfs_register_hctx(q, hctx);
  642. }
  643. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  644. {
  645. struct blk_mq_hw_ctx *hctx;
  646. unsigned long i;
  647. queue_for_each_hw_ctx(q, hctx, i)
  648. blk_mq_debugfs_unregister_hctx(hctx);
  649. }
  650. void blk_mq_debugfs_register_sched(struct request_queue *q)
  651. {
  652. struct elevator_type *e = q->elevator->type;
  653. lockdep_assert_held(&q->debugfs_mutex);
  654. /*
  655. * If the parent directory has not been created yet, return, we will be
  656. * called again later on and the directory/files will be created then.
  657. */
  658. if (!q->debugfs_dir)
  659. return;
  660. if (!e->queue_debugfs_attrs)
  661. return;
  662. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  663. debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
  664. }
  665. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  666. {
  667. lockdep_assert_held(&q->debugfs_mutex);
  668. debugfs_remove_recursive(q->sched_debugfs_dir);
  669. q->sched_debugfs_dir = NULL;
  670. }
  671. static const char *rq_qos_id_to_name(enum rq_qos_id id)
  672. {
  673. switch (id) {
  674. case RQ_QOS_WBT:
  675. return "wbt";
  676. case RQ_QOS_LATENCY:
  677. return "latency";
  678. case RQ_QOS_COST:
  679. return "cost";
  680. }
  681. return "unknown";
  682. }
  683. void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
  684. {
  685. lockdep_assert_held(&rqos->q->debugfs_mutex);
  686. if (!rqos->q->debugfs_dir)
  687. return;
  688. debugfs_remove_recursive(rqos->debugfs_dir);
  689. rqos->debugfs_dir = NULL;
  690. }
  691. void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
  692. {
  693. struct request_queue *q = rqos->q;
  694. const char *dir_name = rq_qos_id_to_name(rqos->id);
  695. lockdep_assert_held(&q->debugfs_mutex);
  696. if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
  697. return;
  698. if (!q->rqos_debugfs_dir)
  699. q->rqos_debugfs_dir = debugfs_create_dir("rqos",
  700. q->debugfs_dir);
  701. rqos->debugfs_dir = debugfs_create_dir(dir_name,
  702. rqos->q->rqos_debugfs_dir);
  703. debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
  704. }
  705. void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  706. struct blk_mq_hw_ctx *hctx)
  707. {
  708. struct elevator_type *e = q->elevator->type;
  709. lockdep_assert_held(&q->debugfs_mutex);
  710. /*
  711. * If the parent debugfs directory has not been created yet, return;
  712. * We will be called again later on with appropriate parent debugfs
  713. * directory from blk_register_queue()
  714. */
  715. if (!hctx->debugfs_dir)
  716. return;
  717. if (!e->hctx_debugfs_attrs)
  718. return;
  719. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  720. hctx->debugfs_dir);
  721. debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  722. e->hctx_debugfs_attrs);
  723. }
  724. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  725. {
  726. lockdep_assert_held(&hctx->queue->debugfs_mutex);
  727. if (!hctx->queue->debugfs_dir)
  728. return;
  729. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  730. hctx->sched_debugfs_dir = NULL;
  731. }