rmnet_genl.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * RMNET Data Generic Netlink
  5. *
  6. */
  7. #include "rmnet_genl.h"
  8. #include <net/sock.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/ktime.h>
  11. #define RMNET_CORE_GENL_MAX_STR_LEN 255
  12. /* Static Functions and Definitions */
  13. static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
  14. 1] = {
  15. [RMNET_CORE_GENL_ATTR_INT] = { .type = NLA_S32 },
  16. [RMNET_CORE_GENL_ATTR_PID_BPS] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_bps_resp)),
  17. [RMNET_CORE_GENL_ATTR_PID_BOOST] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_boost_req)),
  18. [RMNET_CORE_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len =
  19. RMNET_CORE_GENL_MAX_STR_LEN },
  20. };
  21. #define RMNET_CORE_GENL_OP(_cmd, _func) \
  22. { \
  23. .cmd = _cmd, \
  24. .doit = _func, \
  25. .dumpit = NULL, \
  26. .flags = 0, \
  27. }
  28. static const struct genl_ops rmnet_core_genl_ops[] = {
  29. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
  30. rmnet_core_genl_pid_bps_req_hdlr),
  31. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
  32. rmnet_core_genl_pid_boost_req_hdlr),
  33. };
  34. struct genl_family rmnet_core_genl_family = {
  35. .hdrsize = 0,
  36. .name = RMNET_CORE_GENL_FAMILY_NAME,
  37. .version = RMNET_CORE_GENL_VERSION,
  38. .maxattr = RMNET_CORE_GENL_ATTR_MAX,
  39. .policy = rmnet_genl_attr_policy,
  40. .ops = rmnet_core_genl_ops,
  41. .n_ops = ARRAY_SIZE(rmnet_core_genl_ops),
  42. };
  43. #define RMNET_PID_STATS_HT_SIZE (8)
  44. #define RMNET_PID_STATS_HT rmnet_pid_ht
  45. DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
  46. /* Spinlock definition for pid hash table */
  47. static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
  48. #define RMNET_GENL_SEC_TO_MSEC(x) ((x) * 1000)
  49. #define RMNET_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
  50. #define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
  51. #define RMNET_GENL_NSEC_TO_SEC(x) ({\
  52. u64 __quotient = (x); \
  53. do_div(__quotient, 1000000000); \
  54. __quotient; \
  55. })
  56. int rmnet_core_userspace_connected;
  57. #define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
  58. struct rmnet_pid_node_s {
  59. struct hlist_node list;
  60. ktime_t timstamp_last_query;
  61. u64 tx_bytes;
  62. u64 tx_bytes_last_query;
  63. u64 tx_bps;
  64. u64 sched_boost_period_ms;
  65. int sched_boost_remaining_ms;
  66. int sched_boost_enable;
  67. pid_t pid;
  68. };
  69. void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
  70. int *boost_enable, u64 *boost_period)
  71. {
  72. struct hlist_node *tmp;
  73. struct rmnet_pid_node_s *node_p;
  74. unsigned long ht_flags;
  75. u8 is_match_found = 0;
  76. u64 tx_bytes = 0;
  77. *boost_enable = 0;
  78. *boost_period = 0;
  79. /* Using do while to spin lock and unlock only once */
  80. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  81. do {
  82. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  83. list, pid) {
  84. if (pid != node_p->pid)
  85. continue;
  86. /* PID Match found */
  87. is_match_found = 1;
  88. node_p->tx_bytes += len;
  89. tx_bytes = node_p->tx_bytes;
  90. if (node_p->sched_boost_enable) {
  91. rm_err("boost triggered for pid %d",
  92. pid);
  93. /* Just triggered boost, dont re-trigger */
  94. node_p->sched_boost_enable = 0;
  95. *boost_enable = 1;
  96. *boost_period = node_p->sched_boost_period_ms;
  97. node_p->sched_boost_remaining_ms =
  98. (int)*boost_period;
  99. }
  100. break;
  101. }
  102. if (is_match_found)
  103. break;
  104. /* No PID match */
  105. node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
  106. if (!node_p)
  107. break;
  108. node_p->pid = pid;
  109. node_p->tx_bytes = len;
  110. node_p->sched_boost_enable = 0;
  111. node_p->sched_boost_period_ms = 0;
  112. node_p->sched_boost_remaining_ms = 0;
  113. hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
  114. break;
  115. } while (0);
  116. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  117. }
  118. void rmnet_boost_for_pid(pid_t pid, int boost_enable,
  119. u64 boost_period)
  120. {
  121. struct hlist_node *tmp;
  122. struct rmnet_pid_node_s *node_p;
  123. unsigned long ht_flags;
  124. /* Using do while to spin lock and unlock only once */
  125. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  126. do {
  127. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  128. list, pid) {
  129. if (pid != node_p->pid)
  130. continue;
  131. /* PID Match found */
  132. rm_err("CORE_BOOST: enable boost for pid %d for %d ms",
  133. pid, boost_period);
  134. node_p->sched_boost_enable = boost_enable;
  135. node_p->sched_boost_period_ms = boost_period;
  136. break;
  137. }
  138. break;
  139. } while (0);
  140. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  141. }
  142. static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
  143. *pid_bps_resp_ptr)
  144. {
  145. struct timespec64 time;
  146. struct hlist_node *tmp;
  147. struct rmnet_pid_node_s *node_p;
  148. unsigned long ht_flags;
  149. u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
  150. int i;
  151. u16 bkt;
  152. ktime_get_real_ts64(&time);
  153. pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
  154. time.tv_nsec;
  155. /* Using do while to spin lock and unlock only once */
  156. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  157. do {
  158. i = 0;
  159. hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
  160. node_p, list) {
  161. tx_bytes_cur = node_p->tx_bytes;
  162. if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
  163. /* Dont send inactive pids to userspace */
  164. hash_del(&node_p->list);
  165. kfree(node_p);
  166. continue;
  167. }
  168. /* Compute bits per second */
  169. byte_diff = (node_p->tx_bytes -
  170. node_p->tx_bytes_last_query);
  171. time_diff_ns = (pid_bps_resp_ptr->timestamp -
  172. node_p->timstamp_last_query);
  173. tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
  174. /* Note that do_div returns remainder and the */
  175. /* numerator gets assigned the quotient */
  176. /* Since do_div takes the numerator as a reference, */
  177. /* a tmp_bits is used*/
  178. do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
  179. node_p->tx_bps = tmp_bits;
  180. if (node_p->sched_boost_remaining_ms >=
  181. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
  182. node_p->sched_boost_remaining_ms -=
  183. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
  184. rm_err("CORE_BOOST: enabling boost for pid %d\n"
  185. "sched boost remaining = %d ms",
  186. node_p->pid,
  187. node_p->sched_boost_remaining_ms);
  188. } else {
  189. node_p->sched_boost_remaining_ms = 0;
  190. }
  191. pid_bps_resp_ptr->list[i].pid = node_p->pid;
  192. pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
  193. pid_bps_resp_ptr->list[i].boost_remaining_ms =
  194. node_p->sched_boost_remaining_ms;
  195. node_p->timstamp_last_query =
  196. pid_bps_resp_ptr->timestamp;
  197. node_p->tx_bytes_last_query = tx_bytes_cur;
  198. i++;
  199. /* Support copying up to 32 active pids */
  200. if (i >= RMNET_CORE_GENL_MAX_PIDS)
  201. break;
  202. }
  203. break;
  204. } while (0);
  205. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  206. pid_bps_resp_ptr->list_len = i;
  207. }
  208. int rmnet_core_genl_send_resp(struct genl_info *info,
  209. struct rmnet_core_pid_bps_resp *pid_bps_resp)
  210. {
  211. struct sk_buff *skb;
  212. void *msg_head;
  213. int rc;
  214. if (!info || !pid_bps_resp) {
  215. rm_err("%s", "SHS_GNL: Invalid params\n");
  216. goto out;
  217. }
  218. skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
  219. if (!skb)
  220. goto out;
  221. msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
  222. &rmnet_core_genl_family,
  223. 0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
  224. if (!msg_head) {
  225. rc = -ENOMEM;
  226. goto out;
  227. }
  228. rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
  229. sizeof(struct rmnet_core_pid_bps_resp),
  230. pid_bps_resp);
  231. if (rc != 0)
  232. goto out;
  233. genlmsg_end(skb, msg_head);
  234. rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
  235. if (rc != 0)
  236. goto out;
  237. rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
  238. return RMNET_GENL_SUCCESS;
  239. out:
  240. /* TODO: Need to free skb?? */
  241. rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
  242. rmnet_core_userspace_connected = 0;
  243. return RMNET_GENL_FAILURE;
  244. }
  245. int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
  246. struct genl_info *info)
  247. {
  248. struct nlattr *na;
  249. struct rmnet_core_pid_bps_req pid_bps_req;
  250. struct rmnet_core_pid_bps_resp pid_bps_resp;
  251. int is_req_valid = 0;
  252. rm_err("CORE_GNL: %s connected = %d", __func__,
  253. rmnet_core_userspace_connected);
  254. if (!info) {
  255. rm_err("%s", "CORE_GNL: error - info is null");
  256. pid_bps_resp.valid = 0;
  257. } else {
  258. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
  259. if (na) {
  260. if (nla_memcpy(&pid_bps_req, na,
  261. sizeof(pid_bps_req)) > 0) {
  262. is_req_valid = 1;
  263. } else {
  264. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  265. RMNET_CORE_GENL_ATTR_PID_BPS);
  266. }
  267. } else {
  268. rm_err("CORE_GNL: no info->attrs %d\n",
  269. RMNET_CORE_GENL_ATTR_PID_BPS);
  270. }
  271. }
  272. if (!rmnet_core_userspace_connected)
  273. rmnet_core_userspace_connected = 1;
  274. /* Copy to pid/byte list to the payload */
  275. memset(&pid_bps_resp, 0x0,
  276. sizeof(pid_bps_resp));
  277. if (is_req_valid) {
  278. rmnet_create_pid_bps_resp(&pid_bps_resp);
  279. }
  280. pid_bps_resp.valid = 1;
  281. rmnet_core_genl_send_resp(info, &pid_bps_resp);
  282. return RMNET_GENL_SUCCESS;
  283. }
  284. int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
  285. struct genl_info *info)
  286. {
  287. struct nlattr *na;
  288. struct rmnet_core_pid_boost_req pid_boost_req;
  289. int is_req_valid = 0;
  290. u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
  291. u16 i = 0;
  292. rm_err("%s", "CORE_GNL: %s", __func__);
  293. if (!info) {
  294. rm_err("%s", "CORE_GNL: error - info is null");
  295. return RMNET_GENL_FAILURE;
  296. }
  297. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
  298. if (na) {
  299. if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
  300. is_req_valid = 1;
  301. } else {
  302. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  303. RMNET_CORE_GENL_ATTR_PID_BOOST);
  304. return RMNET_GENL_FAILURE;
  305. }
  306. } else {
  307. rm_err("CORE_GNL: no info->attrs %d\n",
  308. RMNET_CORE_GENL_ATTR_PID_BOOST);
  309. return RMNET_GENL_FAILURE;
  310. }
  311. if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
  312. boost_pid_cnt = pid_boost_req.list_len;
  313. if (!pid_boost_req.valid)
  314. boost_pid_cnt = 0;
  315. for (i = 0; i < boost_pid_cnt; i++) {
  316. if (pid_boost_req.list[i].boost_enabled) {
  317. rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
  318. pid_boost_req.list[i].boost_period);
  319. }
  320. }
  321. return RMNET_GENL_SUCCESS;
  322. }
  323. /* register new rmnet core driver generic netlink family */
  324. int rmnet_core_genl_init(void)
  325. {
  326. int ret;
  327. ret = genl_register_family(&rmnet_core_genl_family);
  328. if (ret != 0) {
  329. rm_err("CORE_GNL: register family failed: %i", ret);
  330. genl_unregister_family(&rmnet_core_genl_family);
  331. return RMNET_GENL_FAILURE;
  332. }
  333. rm_err("CORE_GNL: successfully registered generic netlink family: %s",
  334. RMNET_CORE_GENL_FAMILY_NAME);
  335. return RMNET_GENL_SUCCESS;
  336. }
  337. /* Unregister the generic netlink family */
  338. int rmnet_core_genl_deinit(void)
  339. {
  340. int ret;
  341. ret = genl_unregister_family(&rmnet_core_genl_family);
  342. if (ret != 0)
  343. rm_err("CORE_GNL: unregister family failed: %i\n", ret);
  344. return RMNET_GENL_SUCCESS;
  345. }