blk-sec-stat-traffic.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Samsung Block Statistics
  4. *
  5. * Copyright (C) 2021 Manjong Lee <[email protected]>
  6. * Copyright (C) 2021 Junho Kim <[email protected]>
  7. * Copyright (C) 2021 Changheun Lee <[email protected]>
  8. * Copyright (C) 2021 Seunghwan Hyun <[email protected]>
  9. * Copyright (C) 2021 Tran Xuan Nam <[email protected]>
  10. */
  11. #include <linux/sysfs.h>
  12. #include <linux/blk_types.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/blk-mq.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/log2.h>
  17. #include <linux/pm_qos.h>
  18. #include "blk-sec.h"
  19. struct traffic {
  20. u64 transferred_bytes;
  21. int level;
  22. unsigned int timestamp;
  23. struct work_struct update_work;
  24. struct delayed_work notify_work;
  25. };
  26. static DEFINE_PER_CPU(u64, transferred_bytes);
  27. static DEFINE_PER_CPU(struct freq_qos_request, cpufreq_req);
  28. static struct pm_qos_request cpu_pm_req;
  29. static unsigned int interval_ms = 1000;
  30. static unsigned int interval_bytes = 100 * 1024 * 1024;
  31. static struct traffic traffic;
  32. #define TL0_UEVENT_DELAY_MS 2000
  33. #define UPDATE_WORK_TO_TRAFFIC(work) \
  34. container_of(work, struct traffic, update_work)
  35. #define NOTIFY_WORK_TO_TRAFFIC(work) \
  36. container_of(to_delayed_work(work), struct traffic, notify_work)
  37. static u64 get_transferred_bytes(void)
  38. {
  39. u64 bytes = 0;
  40. int cpu;
  41. for_each_possible_cpu(cpu)
  42. bytes += per_cpu(transferred_bytes, cpu);
  43. return bytes;
  44. }
  45. /*
  46. * Convert throughput to level. Level is defined as below:
  47. * 0: 0 - "< 100" MB/s
  48. * 1: 100 - "< 200" MB/s
  49. * 2: 200 - "< 400" MB/s
  50. * 3: 400 - "< 800" MB/s
  51. * ...so on
  52. */
  53. static int tp2level(int tput)
  54. {
  55. if (tput < 100)
  56. return 0;
  57. return (int) ilog2(tput / 100) + 1;
  58. }
  59. static void notify_traffic_level(struct traffic *traffic)
  60. {
  61. #define BUF_SIZE 16
  62. char buf[BUF_SIZE];
  63. char *envp[] = { "NAME=IO_TRAFFIC", buf, NULL, };
  64. int ret;
  65. if (unlikely(IS_ERR(blk_sec_dev)))
  66. return;
  67. memset(buf, 0, BUF_SIZE);
  68. snprintf(buf, BUF_SIZE, "LEVEL=%d", traffic->level);
  69. ret = kobject_uevent_env(&blk_sec_dev->kobj, KOBJ_CHANGE, envp);
  70. if (ret)
  71. pr_err("%s: couldn't send uevent (%d)", __func__, ret);
  72. }
  73. #define MB(x) ((x) / 1024 / 1024)
  74. static void update_traffic_level(struct work_struct *work)
  75. {
  76. struct traffic *traffic = UPDATE_WORK_TO_TRAFFIC(work);
  77. struct traffic old = *traffic;
  78. unsigned int duration_ms;
  79. u64 amount;
  80. int tput;
  81. int delay = 0;
  82. traffic->transferred_bytes = get_transferred_bytes();
  83. traffic->timestamp = jiffies_to_msecs(jiffies);
  84. duration_ms = traffic->timestamp - old.timestamp;
  85. amount = traffic->transferred_bytes - old.transferred_bytes;
  86. tput = MB(amount) * 1000 / duration_ms;
  87. traffic->level = tp2level(tput);
  88. if (!!traffic->level == !!old.level)
  89. return;
  90. if (traffic->level == 0) /* level !0 -> 0 */
  91. delay = msecs_to_jiffies(TL0_UEVENT_DELAY_MS);
  92. cancel_delayed_work_sync(&traffic->notify_work);
  93. schedule_delayed_work(&traffic->notify_work, delay);
  94. }
  95. static void send_uevent(struct work_struct *work)
  96. {
  97. struct traffic *traffic = NOTIFY_WORK_TO_TRAFFIC(work);
  98. notify_traffic_level(traffic);
  99. }
  100. void blk_sec_stat_traffic_update(struct request *rq, unsigned int data_size)
  101. {
  102. unsigned int duration_ms;
  103. u64 amount;
  104. if (req_op(rq) > REQ_OP_WRITE)
  105. return;
  106. this_cpu_add(transferred_bytes, data_size);
  107. duration_ms = jiffies_to_msecs(jiffies) - traffic.timestamp;
  108. amount = get_transferred_bytes() - traffic.transferred_bytes;
  109. if ((duration_ms < interval_ms) && (amount < interval_bytes))
  110. return;
  111. schedule_work(&traffic.update_work);
  112. }
  113. static void init_traffic(struct traffic *traffic)
  114. {
  115. traffic->transferred_bytes = 0;
  116. traffic->level = 0;
  117. traffic->timestamp = jiffies_to_msecs(jiffies);
  118. INIT_WORK(&traffic->update_work, update_traffic_level);
  119. INIT_DELAYED_WORK(&traffic->notify_work, send_uevent);
  120. }
  121. static void allow_cpu_lpm(bool enable)
  122. {
  123. if (enable)
  124. cpu_latency_qos_update_request(&cpu_pm_req, PM_QOS_DEFAULT_VALUE);
  125. else
  126. cpu_latency_qos_update_request(&cpu_pm_req, 0);
  127. }
  128. static ssize_t transferred_bytes_show(struct kobject *kobj,
  129. struct kobj_attribute *attr, char *buf)
  130. {
  131. return scnprintf(buf, PAGE_SIZE, "%llu\n", get_transferred_bytes());
  132. }
  133. static ssize_t cpufreq_min_show(struct kobject *kobj,
  134. struct kobj_attribute *attr, char *buf)
  135. {
  136. struct freq_qos_request *req;
  137. int len = 0;
  138. int i;
  139. for_each_possible_cpu(i) {
  140. req = &per_cpu(cpufreq_req, i);
  141. if (IS_ERR_OR_NULL(req->qos))
  142. continue;
  143. len += scnprintf(buf + len, PAGE_SIZE - len, "%d: %d, %d, %d\n",
  144. i,
  145. req->qos->min_freq.target_value,
  146. req->qos->min_freq.default_value,
  147. req->qos->min_freq.no_constraint_value);
  148. }
  149. return len;
  150. }
  151. static ssize_t cpufreq_min_store(struct kobject *kobj,
  152. struct kobj_attribute *attr, const char *buf, size_t count)
  153. {
  154. struct freq_qos_request *req;
  155. struct cpufreq_policy *policy;
  156. s32 cpufreq_min;
  157. int i;
  158. int ret;
  159. ret = kstrtoint(buf, 10, &cpufreq_min);
  160. if (ret)
  161. return ret;
  162. for_each_possible_cpu(i) {
  163. req = &per_cpu(cpufreq_req, i);
  164. if (IS_ERR_OR_NULL(req->qos)) {
  165. policy = cpufreq_cpu_get(i);
  166. if (!policy)
  167. continue;
  168. freq_qos_add_request(&policy->constraints,
  169. req, FREQ_QOS_MIN, cpufreq_min);
  170. cpufreq_cpu_put(policy);
  171. }
  172. freq_qos_update_request(req, cpufreq_min);
  173. }
  174. return count;
  175. }
  176. static ssize_t cpu_lpm_enabled_show(struct kobject *kobj,
  177. struct kobj_attribute *attr, char *buf)
  178. {
  179. if (IS_ERR_OR_NULL(cpu_pm_req.qos))
  180. return 0;
  181. return scnprintf(buf, PAGE_SIZE, "%d: %d, %d, %d\n",
  182. !!cpu_pm_req.qos->target_value,
  183. cpu_pm_req.qos->target_value,
  184. cpu_pm_req.qos->default_value,
  185. cpu_pm_req.qos->no_constraint_value);
  186. }
  187. static ssize_t cpu_lpm_enabled_store(struct kobject *kobj,
  188. struct kobj_attribute *attr, const char *buf, size_t count)
  189. {
  190. int enable;
  191. int ret;
  192. ret = kstrtoint(buf, 10, &enable);
  193. if (ret)
  194. return ret;
  195. allow_cpu_lpm(!!enable);
  196. return count;
  197. }
  198. static struct kobj_attribute transferred_bytes_attr =
  199. __ATTR(transferred_bytes, 0444, transferred_bytes_show, NULL);
  200. static struct kobj_attribute cpufreq_min_attr =
  201. __ATTR(cpufreq_min, 0600, cpufreq_min_show, cpufreq_min_store);
  202. static struct kobj_attribute cpu_lpm_enable_attr =
  203. __ATTR(cpu_lpm_enable, 0600, cpu_lpm_enabled_show, cpu_lpm_enabled_store);
  204. static const struct attribute *blk_sec_stat_traffic_attrs[] = {
  205. &transferred_bytes_attr.attr,
  206. &cpufreq_min_attr.attr,
  207. &cpu_lpm_enable_attr.attr,
  208. NULL,
  209. };
  210. int blk_sec_stat_traffic_init(struct kobject *kobj)
  211. {
  212. if (!kobj)
  213. return -EINVAL;
  214. init_traffic(&traffic);
  215. cpu_latency_qos_add_request(&cpu_pm_req, PM_QOS_DEFAULT_VALUE);
  216. return sysfs_create_files(kobj, blk_sec_stat_traffic_attrs);
  217. }
  218. void blk_sec_stat_traffic_exit(struct kobject *kobj)
  219. {
  220. if (!kobj)
  221. return;
  222. allow_cpu_lpm(true);
  223. cpu_latency_qos_remove_request(&cpu_pm_req);
  224. cancel_delayed_work_sync(&traffic.notify_work);
  225. sysfs_remove_files(kobj, blk_sec_stat_traffic_attrs);
  226. }