rtrs-clt-stats.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * RDMA Transport Layer
  4. *
  5. * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
  6. * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
  7. * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
  8. */
  9. #undef pr_fmt
  10. #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
  11. #include "rtrs-clt.h"
  12. void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
  13. {
  14. struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
  15. struct rtrs_clt_stats *stats = clt_path->stats;
  16. struct rtrs_clt_stats_pcpu *s;
  17. int cpu;
  18. cpu = raw_smp_processor_id();
  19. s = get_cpu_ptr(stats->pcpu_stats);
  20. if (con->cpu != cpu) {
  21. s->cpu_migr.to++;
  22. /* Careful here, override s pointer */
  23. s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
  24. atomic_inc(&s->cpu_migr.from);
  25. }
  26. put_cpu_ptr(stats->pcpu_stats);
  27. }
  28. void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
  29. {
  30. this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
  31. }
  32. int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
  33. {
  34. struct rtrs_clt_stats_pcpu *s;
  35. size_t used;
  36. int cpu;
  37. used = 0;
  38. for_each_possible_cpu(cpu) {
  39. s = per_cpu_ptr(stats->pcpu_stats, cpu);
  40. used += sysfs_emit_at(buf, used, "%d ",
  41. atomic_read(&s->cpu_migr.from));
  42. }
  43. used += sysfs_emit_at(buf, used, "\n");
  44. return used;
  45. }
  46. int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
  47. {
  48. struct rtrs_clt_stats_pcpu *s;
  49. size_t used;
  50. int cpu;
  51. used = 0;
  52. for_each_possible_cpu(cpu) {
  53. s = per_cpu_ptr(stats->pcpu_stats, cpu);
  54. used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
  55. }
  56. used += sysfs_emit_at(buf, used, "\n");
  57. return used;
  58. }
  59. int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
  60. {
  61. return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
  62. stats->reconnects.fail_cnt);
  63. }
  64. ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
  65. {
  66. struct rtrs_clt_stats_rdma sum;
  67. struct rtrs_clt_stats_rdma *r;
  68. int cpu;
  69. memset(&sum, 0, sizeof(sum));
  70. for_each_possible_cpu(cpu) {
  71. r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
  72. sum.dir[READ].cnt += r->dir[READ].cnt;
  73. sum.dir[READ].size_total += r->dir[READ].size_total;
  74. sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
  75. sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
  76. sum.failover_cnt += r->failover_cnt;
  77. }
  78. return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
  79. sum.dir[READ].cnt, sum.dir[READ].size_total,
  80. sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
  81. atomic_read(&stats->inflight), sum.failover_cnt);
  82. }
  83. ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
  84. {
  85. return sysfs_emit(page, "echo 1 to reset all statistics\n");
  86. }
  87. int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
  88. {
  89. struct rtrs_clt_stats_pcpu *s;
  90. int cpu;
  91. if (!enable)
  92. return -EINVAL;
  93. for_each_possible_cpu(cpu) {
  94. s = per_cpu_ptr(stats->pcpu_stats, cpu);
  95. memset(&s->rdma, 0, sizeof(s->rdma));
  96. }
  97. return 0;
  98. }
  99. int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
  100. {
  101. struct rtrs_clt_stats_pcpu *s;
  102. int cpu;
  103. if (!enable)
  104. return -EINVAL;
  105. for_each_possible_cpu(cpu) {
  106. s = per_cpu_ptr(stats->pcpu_stats, cpu);
  107. memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
  108. }
  109. return 0;
  110. }
  111. int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
  112. {
  113. if (!enable)
  114. return -EINVAL;
  115. memset(&stats->reconnects, 0, sizeof(stats->reconnects));
  116. return 0;
  117. }
  118. int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
  119. {
  120. if (enable) {
  121. rtrs_clt_reset_rdma_stats(s, enable);
  122. rtrs_clt_reset_cpu_migr_stats(s, enable);
  123. rtrs_clt_reset_reconnects_stat(s, enable);
  124. atomic_set(&s->inflight, 0);
  125. return 0;
  126. }
  127. return -EINVAL;
  128. }
  129. static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
  130. size_t size, int d)
  131. {
  132. this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
  133. this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
  134. }
  135. void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
  136. {
  137. struct rtrs_clt_con *con = req->con;
  138. struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
  139. struct rtrs_clt_stats *stats = clt_path->stats;
  140. unsigned int len;
  141. len = req->usr_len + req->data_len;
  142. rtrs_clt_update_rdma_stats(stats, len, dir);
  143. if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
  144. atomic_inc(&stats->inflight);
  145. }
  146. int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
  147. {
  148. stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
  149. if (!stats->pcpu_stats)
  150. return -ENOMEM;
  151. /*
  152. * successful_cnt will be set to 0 after session
  153. * is established for the first time
  154. */
  155. stats->reconnects.successful_cnt = -1;
  156. return 0;
  157. }