rmnet_genl.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  3. *
  4. * RMNET Data Generic Netlink
  5. *
  6. */
  7. #include "rmnet_genl.h"
  8. #include <net/sock.h>
  9. #include <linux/skbuff.h>
  10. #define RMNET_CORE_GENL_MAX_STR_LEN 255
  11. /* Static Functions and Definitions */
  12. static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
  13. 1] = {
  14. [RMNET_CORE_GENL_ATTR_INT] = { .type = NLA_S32 },
  15. [RMNET_CORE_GENL_ATTR_PID_BPS] = { .type = NLA_EXACT_LEN, .len =
  16. sizeof(struct rmnet_core_pid_bps_resp) },
  17. [RMNET_CORE_GENL_ATTR_PID_BOOST] = { .type = NLA_EXACT_LEN, .len =
  18. sizeof(struct rmnet_core_pid_boost_req) },
  19. [RMNET_CORE_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len =
  20. RMNET_CORE_GENL_MAX_STR_LEN },
  21. };
  22. #define RMNET_CORE_GENL_OP(_cmd, _func) \
  23. { \
  24. .cmd = _cmd, \
  25. .doit = _func, \
  26. .dumpit = NULL, \
  27. .flags = 0, \
  28. }
  29. static const struct genl_ops rmnet_core_genl_ops[] = {
  30. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
  31. rmnet_core_genl_pid_bps_req_hdlr),
  32. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
  33. rmnet_core_genl_pid_boost_req_hdlr),
  34. };
  35. struct genl_family rmnet_core_genl_family = {
  36. .hdrsize = 0,
  37. .name = RMNET_CORE_GENL_FAMILY_NAME,
  38. .version = RMNET_CORE_GENL_VERSION,
  39. .maxattr = RMNET_CORE_GENL_ATTR_MAX,
  40. .policy = rmnet_genl_attr_policy,
  41. .ops = rmnet_core_genl_ops,
  42. .n_ops = ARRAY_SIZE(rmnet_core_genl_ops),
  43. };
  44. #define RMNET_PID_STATS_HT_SIZE (8)
  45. #define RMNET_PID_STATS_HT rmnet_pid_ht
  46. DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
  47. /* Spinlock definition for pid hash table */
  48. static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
  49. #define RMNET_GENL_SEC_TO_MSEC(x) ((x) * 1000)
  50. #define RMNET_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
  51. #define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
  52. #define RMNET_GENL_NSEC_TO_SEC(x) ({\
  53. u64 __quotient = (x); \
  54. do_div(__quotient, 1000000000); \
  55. __quotient; \
  56. })
  57. int rmnet_core_userspace_connected;
  58. #define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
  59. struct rmnet_pid_node_s {
  60. struct hlist_node list;
  61. time_t timstamp_last_query;
  62. u64 tx_bytes;
  63. u64 tx_bytes_last_query;
  64. u64 tx_bps;
  65. u64 sched_boost_period_ms;
  66. int sched_boost_remaining_ms;
  67. int sched_boost_enable;
  68. pid_t pid;
  69. };
  70. void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
  71. int *boost_enable, u64 *boost_period)
  72. {
  73. struct hlist_node *tmp;
  74. struct rmnet_pid_node_s *node_p;
  75. unsigned long ht_flags;
  76. u8 is_match_found = 0;
  77. u64 tx_bytes = 0;
  78. *boost_enable = 0;
  79. *boost_period = 0;
  80. /* Using do while to spin lock and unlock only once */
  81. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  82. do {
  83. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  84. list, pid) {
  85. if (pid != node_p->pid)
  86. continue;
  87. /* PID Match found */
  88. is_match_found = 1;
  89. node_p->tx_bytes += len;
  90. tx_bytes = node_p->tx_bytes;
  91. if (node_p->sched_boost_enable) {
  92. rm_err("boost triggered for pid %d",
  93. pid);
  94. /* Just triggered boost, dont re-trigger */
  95. node_p->sched_boost_enable = 0;
  96. *boost_enable = 1;
  97. *boost_period = node_p->sched_boost_period_ms;
  98. node_p->sched_boost_remaining_ms =
  99. (int)*boost_period;
  100. }
  101. break;
  102. }
  103. if (is_match_found)
  104. break;
  105. /* No PID match */
  106. node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
  107. if (!node_p)
  108. break;
  109. node_p->pid = pid;
  110. node_p->tx_bytes = len;
  111. node_p->sched_boost_enable = 0;
  112. node_p->sched_boost_period_ms = 0;
  113. node_p->sched_boost_remaining_ms = 0;
  114. hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
  115. break;
  116. } while (0);
  117. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  118. }
  119. void rmnet_boost_for_pid(pid_t pid, int boost_enable,
  120. u64 boost_period)
  121. {
  122. struct hlist_node *tmp;
  123. struct rmnet_pid_node_s *node_p;
  124. unsigned long ht_flags;
  125. /* Using do while to spin lock and unlock only once */
  126. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  127. do {
  128. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  129. list, pid) {
  130. if (pid != node_p->pid)
  131. continue;
  132. /* PID Match found */
  133. rm_err("CORE_BOOST: enable boost for pid %d for %d ms",
  134. pid, boost_period);
  135. node_p->sched_boost_enable = boost_enable;
  136. node_p->sched_boost_period_ms = boost_period;
  137. break;
  138. }
  139. break;
  140. } while (0);
  141. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  142. }
  143. static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
  144. *pid_bps_resp_ptr)
  145. {
  146. struct timespec64 time;
  147. struct hlist_node *tmp;
  148. struct rmnet_pid_node_s *node_p;
  149. unsigned long ht_flags;
  150. u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
  151. int i;
  152. u16 bkt;
  153. ktime_get_real_ts64(&time);
  154. pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
  155. time.tv_nsec;
  156. /* Using do while to spin lock and unlock only once */
  157. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  158. do {
  159. i = 0;
  160. hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
  161. node_p, list) {
  162. tx_bytes_cur = node_p->tx_bytes;
  163. if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
  164. /* Dont send inactive pids to userspace */
  165. hash_del(&node_p->list);
  166. kfree(node_p);
  167. continue;
  168. }
  169. /* Compute bits per second */
  170. byte_diff = (node_p->tx_bytes -
  171. node_p->tx_bytes_last_query);
  172. time_diff_ns = (pid_bps_resp_ptr->timestamp -
  173. node_p->timstamp_last_query);
  174. tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
  175. /* Note that do_div returns remainder and the */
  176. /* numerator gets assigned the quotient */
  177. /* Since do_div takes the numerator as a reference, */
  178. /* a tmp_bits is used*/
  179. do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
  180. node_p->tx_bps = tmp_bits;
  181. if (node_p->sched_boost_remaining_ms >=
  182. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
  183. node_p->sched_boost_remaining_ms -=
  184. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
  185. rm_err("CORE_BOOST: enabling boost for pid %d\n"
  186. "sched boost remaining = %d ms",
  187. node_p->pid,
  188. node_p->sched_boost_remaining_ms);
  189. } else {
  190. node_p->sched_boost_remaining_ms = 0;
  191. }
  192. pid_bps_resp_ptr->list[i].pid = node_p->pid;
  193. pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
  194. pid_bps_resp_ptr->list[i].boost_remaining_ms =
  195. node_p->sched_boost_remaining_ms;
  196. node_p->timstamp_last_query =
  197. pid_bps_resp_ptr->timestamp;
  198. node_p->tx_bytes_last_query = tx_bytes_cur;
  199. i++;
  200. /* Support copying up to 32 active pids */
  201. if (i >= RMNET_CORE_GENL_MAX_PIDS)
  202. break;
  203. }
  204. break;
  205. } while (0);
  206. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  207. pid_bps_resp_ptr->list_len = i;
  208. }
  209. int rmnet_core_genl_send_resp(struct genl_info *info,
  210. struct rmnet_core_pid_bps_resp *pid_bps_resp)
  211. {
  212. struct sk_buff *skb;
  213. void *msg_head;
  214. int rc;
  215. if (!info || !pid_bps_resp) {
  216. rm_err("%s", "SHS_GNL: Invalid params\n");
  217. goto out;
  218. }
  219. skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
  220. if (!skb)
  221. goto out;
  222. msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
  223. &rmnet_core_genl_family,
  224. 0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
  225. if (!msg_head) {
  226. rc = -ENOMEM;
  227. goto out;
  228. }
  229. rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
  230. sizeof(struct rmnet_core_pid_bps_resp),
  231. pid_bps_resp);
  232. if (rc != 0)
  233. goto out;
  234. genlmsg_end(skb, msg_head);
  235. rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
  236. if (rc != 0)
  237. goto out;
  238. rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
  239. return RMNET_GENL_SUCCESS;
  240. out:
  241. /* TODO: Need to free skb?? */
  242. rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
  243. rmnet_core_userspace_connected = 0;
  244. return RMNET_GENL_FAILURE;
  245. }
  246. int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
  247. struct genl_info *info)
  248. {
  249. struct nlattr *na;
  250. struct rmnet_core_pid_bps_req pid_bps_req;
  251. struct rmnet_core_pid_bps_resp pid_bps_resp;
  252. int is_req_valid = 0;
  253. rm_err("CORE_GNL: %s connected = %d", __func__,
  254. rmnet_core_userspace_connected);
  255. if (!info) {
  256. rm_err("%s", "CORE_GNL: error - info is null");
  257. pid_bps_resp.valid = 0;
  258. } else {
  259. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
  260. if (na) {
  261. if (nla_memcpy(&pid_bps_req, na,
  262. sizeof(pid_bps_req)) > 0) {
  263. is_req_valid = 1;
  264. } else {
  265. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  266. RMNET_CORE_GENL_ATTR_PID_BPS);
  267. }
  268. } else {
  269. rm_err("CORE_GNL: no info->attrs %d\n",
  270. RMNET_CORE_GENL_ATTR_PID_BPS);
  271. }
  272. }
  273. if (!rmnet_core_userspace_connected)
  274. rmnet_core_userspace_connected = 1;
  275. /* Copy to pid/byte list to the payload */
  276. if (is_req_valid) {
  277. memset(&pid_bps_resp, 0x0,
  278. sizeof(pid_bps_resp));
  279. rmnet_create_pid_bps_resp(&pid_bps_resp);
  280. }
  281. pid_bps_resp.valid = 1;
  282. rmnet_core_genl_send_resp(info, &pid_bps_resp);
  283. return RMNET_GENL_SUCCESS;
  284. }
  285. int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
  286. struct genl_info *info)
  287. {
  288. struct nlattr *na;
  289. struct rmnet_core_pid_boost_req pid_boost_req;
  290. int is_req_valid = 0;
  291. u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
  292. u16 i = 0;
  293. rm_err("%s", "CORE_GNL: %s", __func__);
  294. if (!info) {
  295. rm_err("%s", "CORE_GNL: error - info is null");
  296. return RMNET_GENL_FAILURE;
  297. }
  298. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
  299. if (na) {
  300. if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
  301. is_req_valid = 1;
  302. } else {
  303. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  304. RMNET_CORE_GENL_ATTR_PID_BOOST);
  305. return RMNET_GENL_FAILURE;
  306. }
  307. } else {
  308. rm_err("CORE_GNL: no info->attrs %d\n",
  309. RMNET_CORE_GENL_ATTR_PID_BOOST);
  310. return RMNET_GENL_FAILURE;
  311. }
  312. if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
  313. boost_pid_cnt = pid_boost_req.list_len;
  314. if (!pid_boost_req.valid)
  315. boost_pid_cnt = 0;
  316. for (i = 0; i < boost_pid_cnt; i++) {
  317. if (pid_boost_req.list[i].boost_enabled) {
  318. rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
  319. pid_boost_req.list[i].boost_period);
  320. }
  321. }
  322. return RMNET_GENL_SUCCESS;
  323. }
  324. /* register new rmnet core driver generic netlink family */
  325. int rmnet_core_genl_init(void)
  326. {
  327. int ret;
  328. ret = genl_register_family(&rmnet_core_genl_family);
  329. if (ret != 0) {
  330. rm_err("CORE_GNL: register family failed: %i", ret);
  331. genl_unregister_family(&rmnet_core_genl_family);
  332. return RMNET_GENL_FAILURE;
  333. }
  334. rm_err("CORE_GNL: successfully registered generic netlink family: %s",
  335. RMNET_CORE_GENL_FAMILY_NAME);
  336. return RMNET_GENL_SUCCESS;
  337. }
  338. /* Unregister the generic netlink family */
  339. int rmnet_core_genl_deinit(void)
  340. {
  341. int ret;
  342. ret = genl_unregister_family(&rmnet_core_genl_family);
  343. if (ret != 0)
  344. rm_err("CORE_GNL: unregister family failed: %i\n", ret);
  345. return RMNET_GENL_SUCCESS;
  346. }