ssg-stat.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Statistics of SamSung Generic I/O scheduler
  4. *
  5. * Copyright (C) 2021 Changheun Lee <[email protected]>
  6. */
  7. #include <linux/blkdev.h>
  8. #include <linux/blk-mq.h>
  9. #include <linux/sbitmap.h>
  10. #include "elevator.h"
  11. #include "blk-mq.h"
  12. #include "blk-mq-tag.h"
  13. #include "ssg.h"
  14. #define IO_TYPES (REQ_OP_DISCARD + 1)
  15. static unsigned int byte_table[] = {
  16. 4096, // 4KB
  17. 32768, // 32KB
  18. 65536, // 64KB
  19. 131072, // 128KB
  20. 524288, // 512KB
  21. 1048576, // 1MB
  22. UINT_MAX // should be last in this array
  23. };
  24. #define BYTE_TABLE_SIZE (sizeof(byte_table)/sizeof(unsigned int))
  25. static u64 nsec_table[] = {
  26. 500000, // 0.5ms
  27. 1000000, // 1ms
  28. 2000000, // 2ms
  29. 3000000, // 3ms
  30. 4000000, // 4ms
  31. 5000000, // 5ms
  32. 10000000, // 10ms
  33. 20000000, // 20ms
  34. ULLONG_MAX // should be last in this array
  35. };
  36. #define NSEC_TABLE_SIZE (sizeof(nsec_table)/sizeof(u64))
  37. struct ssg_stats {
  38. u64 io_latency_cnt[IO_TYPES][BYTE_TABLE_SIZE][NSEC_TABLE_SIZE];
  39. };
  40. struct ssg_bt_tags_iter_data {
  41. struct blk_mq_tags *tags;
  42. void *data;
  43. bool reserved;
  44. };
  45. typedef bool (ssg_tag_iter_fn)(struct sbitmap *, unsigned int, void *);
  46. static unsigned int byte_to_index(unsigned int byte)
  47. {
  48. unsigned int idx;
  49. for (idx = 0; idx < BYTE_TABLE_SIZE; idx++)
  50. if (byte <= byte_table[idx])
  51. return idx;
  52. return BYTE_TABLE_SIZE - 1;
  53. }
  54. static unsigned int nsec_to_index(u64 nsec)
  55. {
  56. unsigned int idx;
  57. for (idx = 0; idx < NSEC_TABLE_SIZE; idx++)
  58. if (nsec <= nsec_table[idx])
  59. return idx;
  60. return NSEC_TABLE_SIZE - 1;
  61. }
  62. static void update_io_latency(struct ssg_data *ssg, struct request *rq,
  63. unsigned int data_size, u64 now)
  64. {
  65. struct ssg_stats *stats;
  66. int type = req_op(rq);
  67. int byte_idx, ns_idx;
  68. if (type > REQ_OP_DISCARD)
  69. return;
  70. if (rq->io_start_time_ns > now)
  71. return;
  72. byte_idx = byte_to_index(data_size);
  73. ns_idx = nsec_to_index(now - rq->io_start_time_ns);
  74. stats = get_cpu_ptr(ssg->stats);
  75. stats->io_latency_cnt[type][byte_idx][ns_idx]++;
  76. put_cpu_ptr(stats);
  77. }
  78. void ssg_stat_account_io_done(struct ssg_data *ssg, struct request *rq,
  79. unsigned int data_size, u64 now)
  80. {
  81. if (unlikely(!ssg->stats))
  82. return;
  83. update_io_latency(ssg, rq, data_size, now);
  84. }
  85. static int print_io_latency(struct ssg_stats __percpu *stats, int io_type,
  86. char *buf, int buf_size)
  87. {
  88. u64 sum[BYTE_TABLE_SIZE][NSEC_TABLE_SIZE] = { 0, };
  89. int cpu;
  90. int len = 0;
  91. int byte_idx, ns_idx;
  92. for_each_possible_cpu(cpu) {
  93. struct ssg_stats *s = per_cpu_ptr(stats, cpu);
  94. for (byte_idx = 0; byte_idx < BYTE_TABLE_SIZE; byte_idx++)
  95. for (ns_idx = 0; ns_idx < NSEC_TABLE_SIZE; ns_idx++)
  96. sum[byte_idx][ns_idx] +=
  97. s->io_latency_cnt[io_type][byte_idx][ns_idx];
  98. }
  99. for (byte_idx = 0; byte_idx < BYTE_TABLE_SIZE; byte_idx++) {
  100. len += snprintf(buf + len, buf_size - len, "%u:",
  101. byte_table[byte_idx] / 1024);
  102. for (ns_idx = 0; ns_idx < NSEC_TABLE_SIZE; ns_idx++)
  103. len += snprintf(buf + len, buf_size - len, " %llu",
  104. sum[byte_idx][ns_idx]);
  105. len += snprintf(buf + len, buf_size - len, "\n");
  106. }
  107. return len;
  108. }
  109. #define IO_LATENCY_SHOW_FUNC(__FUNC, __IO_TYPE) \
  110. ssize_t __FUNC(struct elevator_queue *e, char *page) \
  111. { \
  112. struct ssg_data *ssg = e->elevator_data; \
  113. if (unlikely(!ssg->stats)) \
  114. return 0; \
  115. return print_io_latency(ssg->stats, \
  116. __IO_TYPE, page, PAGE_SIZE); \
  117. }
  118. IO_LATENCY_SHOW_FUNC(ssg_stat_read_latency_show, REQ_OP_READ);
  119. IO_LATENCY_SHOW_FUNC(ssg_stat_write_latency_show, REQ_OP_WRITE);
  120. IO_LATENCY_SHOW_FUNC(ssg_stat_flush_latency_show, REQ_OP_FLUSH);
  121. IO_LATENCY_SHOW_FUNC(ssg_stat_discard_latency_show, REQ_OP_DISCARD);
  122. static void ssg_all_tag_iter(struct blk_mq_tags *tags, ssg_tag_iter_fn *fn, struct ssg_bt_tags_iter_data *iter_data)
  123. {
  124. iter_data->tags = tags;
  125. if (tags->nr_reserved_tags) {
  126. iter_data->reserved = true;
  127. sbitmap_for_each_set(&tags->breserved_tags.sb, fn, iter_data);
  128. }
  129. iter_data->reserved = false;
  130. sbitmap_for_each_set(&tags->bitmap_tags.sb, fn, iter_data);
  131. }
  132. static bool ssg_count_inflight(struct sbitmap *bitmap, unsigned int bitnr, void *data)
  133. {
  134. struct ssg_bt_tags_iter_data *iter_data = data;
  135. struct blk_mq_tags *tags = iter_data->tags;
  136. unsigned int *inflight = iter_data->data;
  137. bool reserved = iter_data->reserved;
  138. struct request *rq;
  139. if (!reserved)
  140. bitnr += tags->nr_reserved_tags;
  141. rq = tags->static_rqs[bitnr];
  142. if (!rq)
  143. return true;
  144. if (req_op(rq) < IO_TYPES)
  145. inflight[req_op(rq)]++;
  146. return true;
  147. }
  148. static void get_ssg_inflight(struct request_queue *q, unsigned int *inflight)
  149. {
  150. struct blk_mq_hw_ctx *hctx;
  151. unsigned long i;
  152. struct ssg_bt_tags_iter_data iter_data = {
  153. .data = inflight,
  154. };
  155. if (blk_mq_is_shared_tags(q->tag_set->flags)) {
  156. ssg_all_tag_iter(q->sched_shared_tags, ssg_count_inflight, &iter_data);
  157. } else {
  158. queue_for_each_hw_ctx(q, hctx, i) {
  159. /*
  160. * If no software queues are currently mapped to this
  161. * hardware queue, there's nothing to check
  162. */
  163. if (!blk_mq_hw_queue_mapped(hctx))
  164. continue;
  165. ssg_all_tag_iter(hctx->sched_tags, ssg_count_inflight, &iter_data);
  166. }
  167. }
  168. }
  169. ssize_t ssg_stat_inflight_show(struct elevator_queue *e, char *page)
  170. {
  171. struct ssg_data *ssg = e->elevator_data;
  172. unsigned int inflight[IO_TYPES] = {0, };
  173. if (unlikely(!ssg->stats))
  174. return 0;
  175. get_ssg_inflight(ssg->queue, inflight);
  176. return snprintf(page, PAGE_SIZE, "%u %u %u\n", inflight[REQ_OP_READ],
  177. inflight[REQ_OP_WRITE], inflight[REQ_OP_DISCARD]);
  178. }
  179. static bool print_ssg_rq_info(struct sbitmap *bitmap, unsigned int bitnr, void *data)
  180. {
  181. struct ssg_bt_tags_iter_data *iter_data = data;
  182. struct blk_mq_tags *tags = iter_data->tags;
  183. bool reserved = iter_data->reserved;
  184. char *page = iter_data->data;
  185. struct request *rq;
  186. int len = strlen(page);
  187. if (!reserved)
  188. bitnr += tags->nr_reserved_tags;
  189. rq = tags->static_rqs[bitnr];
  190. if (!rq)
  191. return true;
  192. scnprintf(page + len, PAGE_SIZE - len, "%d %d %x %x %llu %u %llu %d\n",
  193. rq->tag, rq->internal_tag, req_op(rq), rq->rq_flags,
  194. blk_rq_pos(rq), blk_rq_bytes(rq), rq->start_time_ns, rq->state);
  195. return true;
  196. }
  197. static void print_ssg_rqs(struct request_queue *q, char *page)
  198. {
  199. struct blk_mq_hw_ctx *hctx;
  200. unsigned long i;
  201. struct ssg_bt_tags_iter_data iter_data = {
  202. .data = page,
  203. };
  204. if (blk_mq_is_shared_tags(q->tag_set->flags)) {
  205. ssg_all_tag_iter(q->sched_shared_tags, print_ssg_rq_info, &iter_data);
  206. } else {
  207. queue_for_each_hw_ctx(q, hctx, i) {
  208. /*
  209. * If no software queues are currently mapped to this
  210. * hardware queue, there's nothing to check
  211. */
  212. if (!blk_mq_hw_queue_mapped(hctx))
  213. continue;
  214. ssg_all_tag_iter(hctx->sched_tags, print_ssg_rq_info, &iter_data);
  215. }
  216. }
  217. }
  218. ssize_t ssg_stat_rqs_info_show(struct elevator_queue *e, char *page)
  219. {
  220. struct ssg_data *ssg = e->elevator_data;
  221. if (unlikely(!ssg->stats))
  222. return 0;
  223. print_ssg_rqs(ssg->queue, page);
  224. return strlen(page);
  225. }
  226. int ssg_stat_init(struct ssg_data *ssg)
  227. {
  228. ssg->stats = alloc_percpu_gfp(struct ssg_stats,
  229. GFP_KERNEL | __GFP_ZERO);
  230. if (!ssg->stats)
  231. return -ENOMEM;
  232. return 0;
  233. }
  234. void ssg_stat_exit(struct ssg_data *ssg)
  235. {
  236. if (ssg->stats)
  237. free_percpu(ssg->stats);
  238. }