rmnet_genl.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2019, The Linux Foundation. All rights reserved.
  3. *
  4. * RMNET Data Generic Netlink
  5. *
  6. */
  7. #include "rmnet_genl.h"
  8. #include <net/sock.h>
  9. #include <linux/skbuff.h>
  10. #define RMNET_CORE_GENL_MAX_STR_LEN 255
  11. /* Static Functions and Definitions */
  12. static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
  13. 1] = {
  14. [RMNET_CORE_GENL_ATTR_INT] = { .type = NLA_S32 },
  15. [RMNET_CORE_GENL_ATTR_PID_BPS] = { .type = NLA_EXACT_LEN, .len =
  16. sizeof(struct rmnet_core_pid_bps_resp) },
  17. [RMNET_CORE_GENL_ATTR_PID_BOOST] = { .type = NLA_EXACT_LEN, .len =
  18. sizeof(struct rmnet_core_pid_boost_req) },
  19. [RMNET_CORE_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len =
  20. RMNET_CORE_GENL_MAX_STR_LEN },
  21. };
  22. #define RMNET_CORE_GENL_OP(_cmd, _func) \
  23. { \
  24. .cmd = _cmd, \
  25. .doit = _func, \
  26. .dumpit = NULL, \
  27. .flags = 0, \
  28. }
  29. static const struct genl_ops rmnet_core_genl_ops[] = {
  30. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
  31. rmnet_core_genl_pid_bps_req_hdlr),
  32. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
  33. rmnet_core_genl_pid_boost_req_hdlr),
  34. };
  35. struct genl_family rmnet_core_genl_family = {
  36. .hdrsize = 0,
  37. .name = RMNET_CORE_GENL_FAMILY_NAME,
  38. .version = RMNET_CORE_GENL_VERSION,
  39. .maxattr = RMNET_CORE_GENL_ATTR_MAX,
  40. .policy = rmnet_genl_attr_policy,
  41. .ops = rmnet_core_genl_ops,
  42. .n_ops = ARRAY_SIZE(rmnet_core_genl_ops),
  43. };
  44. #define RMNET_PID_STATS_HT_SIZE (8)
  45. #define RMNET_PID_STATS_HT rmnet_pid_ht
  46. DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
  47. /* Spinlock definition for pid hash table */
  48. static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
  49. #define RMNET_GENL_SEC_TO_MSEC(x) ((x) * 1000)
  50. #define RMNET_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
  51. #define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
  52. #define RMNET_GENL_NSEC_TO_SEC(x) ({\
  53. u64 __quotient = (x); \
  54. do_div(__quotient, 1000000000); \
  55. __quotient; \
  56. })
  57. int rmnet_core_userspace_connected;
  58. #define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
  59. struct rmnet_pid_node_s {
  60. struct hlist_node list;
  61. time_t timstamp_last_query;
  62. u64 tx_bytes;
  63. u64 tx_bytes_last_query;
  64. u64 tx_bps;
  65. u64 sched_boost_period_ms;
  66. int sched_boost_remaining_ms;
  67. int sched_boost_enable;
  68. pid_t pid;
  69. };
  70. void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
  71. int *boost_enable, u64 *boost_period)
  72. {
  73. struct hlist_node *tmp;
  74. struct rmnet_pid_node_s *node_p;
  75. unsigned long ht_flags;
  76. u8 is_match_found = 0;
  77. u64 tx_bytes = 0;
  78. *boost_enable = 0;
  79. *boost_period = 0;
  80. /* Using do while to spin lock and unlock only once */
  81. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  82. do {
  83. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  84. list, pid) {
  85. if (pid != node_p->pid)
  86. continue;
  87. /* PID Match found */
  88. is_match_found = 1;
  89. node_p->tx_bytes += len;
  90. tx_bytes = node_p->tx_bytes;
  91. if (node_p->sched_boost_enable) {
  92. rm_err("boost triggered for pid %d",
  93. pid);
  94. /* Just triggered boost, dont re-trigger */
  95. node_p->sched_boost_enable = 0;
  96. *boost_enable = 1;
  97. *boost_period = node_p->sched_boost_period_ms;
  98. node_p->sched_boost_remaining_ms =
  99. (int)*boost_period;
  100. }
  101. break;
  102. }
  103. if (is_match_found)
  104. break;
  105. /* No PID match */
  106. node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
  107. if (!node_p)
  108. break;
  109. node_p->pid = pid;
  110. node_p->tx_bytes = len;
  111. node_p->sched_boost_enable = 0;
  112. node_p->sched_boost_period_ms = 0;
  113. node_p->sched_boost_remaining_ms = 0;
  114. hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
  115. break;
  116. } while (0);
  117. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  118. }
  119. void rmnet_boost_for_pid(pid_t pid, int boost_enable,
  120. u64 boost_period)
  121. {
  122. struct hlist_node *tmp;
  123. struct rmnet_pid_node_s *node_p;
  124. unsigned long ht_flags;
  125. /* Using do while to spin lock and unlock only once */
  126. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  127. do {
  128. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  129. list, pid) {
  130. if (pid != node_p->pid)
  131. continue;
  132. /* PID Match found */
  133. rm_err("CORE_BOOST: enable boost for pid %d for %d ms",
  134. pid, boost_period);
  135. node_p->sched_boost_enable = boost_enable;
  136. node_p->sched_boost_period_ms = boost_period;
  137. break;
  138. }
  139. break;
  140. } while (0);
  141. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  142. }
  143. static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
  144. *pid_bps_resp_ptr)
  145. {
  146. struct timespec64 time;
  147. struct hlist_node *tmp;
  148. struct rmnet_pid_node_s *node_p;
  149. unsigned long ht_flags;
  150. u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
  151. int i;
  152. u16 bkt;
  153. ktime_get_real_ts64(&time);
  154. pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
  155. time.tv_nsec;
  156. /* Using do while to spin lock and unlock only once */
  157. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  158. do {
  159. i = 0;
  160. hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
  161. node_p, list) {
  162. tx_bytes_cur = node_p->tx_bytes;
  163. if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
  164. /* Dont send inactive pids to userspace */
  165. /* TODO: can remove from hash table probably */
  166. node_p->tx_bps = 0;
  167. node_p->timstamp_last_query =
  168. pid_bps_resp_ptr->timestamp;
  169. node_p->sched_boost_remaining_ms = 0;
  170. continue;
  171. }
  172. /* Compute bits per second */
  173. byte_diff = (node_p->tx_bytes -
  174. node_p->tx_bytes_last_query);
  175. time_diff_ns = (pid_bps_resp_ptr->timestamp -
  176. node_p->timstamp_last_query);
  177. tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
  178. /* Note that do_div returns remainder and the */
  179. /* numerator gets assigned the quotient */
  180. /* Since do_div takes the numerator as a reference, */
  181. /* a tmp_bits is used*/
  182. do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
  183. node_p->tx_bps = tmp_bits;
  184. if (node_p->sched_boost_remaining_ms >=
  185. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
  186. node_p->sched_boost_remaining_ms -=
  187. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
  188. rm_err("CORE_BOOST: enabling boost for pid %d\n"
  189. "sched boost remaining = %d ms",
  190. node_p->pid,
  191. node_p->sched_boost_remaining_ms);
  192. } else {
  193. node_p->sched_boost_remaining_ms = 0;
  194. }
  195. pid_bps_resp_ptr->list[i].pid = node_p->pid;
  196. pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
  197. pid_bps_resp_ptr->list[i].boost_remaining_ms =
  198. node_p->sched_boost_remaining_ms;
  199. node_p->timstamp_last_query =
  200. pid_bps_resp_ptr->timestamp;
  201. node_p->tx_bytes_last_query = tx_bytes_cur;
  202. i++;
  203. /* Support copying up to 32 active pids */
  204. if (i >= RMNET_CORE_GENL_MAX_PIDS)
  205. break;
  206. }
  207. break;
  208. } while (0);
  209. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  210. pid_bps_resp_ptr->list_len = i;
  211. }
  212. int rmnet_core_genl_send_resp(struct genl_info *info,
  213. struct rmnet_core_pid_bps_resp *pid_bps_resp)
  214. {
  215. struct sk_buff *skb;
  216. void *msg_head;
  217. int rc;
  218. if (!info || !pid_bps_resp) {
  219. rm_err("%s", "SHS_GNL: Invalid params\n");
  220. goto out;
  221. }
  222. skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
  223. if (!skb)
  224. goto out;
  225. msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
  226. &rmnet_core_genl_family,
  227. 0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
  228. if (!msg_head) {
  229. rc = -ENOMEM;
  230. goto out;
  231. }
  232. rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
  233. sizeof(struct rmnet_core_pid_bps_resp),
  234. pid_bps_resp);
  235. if (rc != 0)
  236. goto out;
  237. genlmsg_end(skb, msg_head);
  238. rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
  239. if (rc != 0)
  240. goto out;
  241. rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
  242. return RMNET_GENL_SUCCESS;
  243. out:
  244. /* TODO: Need to free skb?? */
  245. rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
  246. rmnet_core_userspace_connected = 0;
  247. return RMNET_GENL_FAILURE;
  248. }
  249. int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
  250. struct genl_info *info)
  251. {
  252. struct nlattr *na;
  253. struct rmnet_core_pid_bps_req pid_bps_req;
  254. struct rmnet_core_pid_bps_resp pid_bps_resp;
  255. int is_req_valid = 0;
  256. rm_err("CORE_GNL: %s connected = %d", __func__,
  257. rmnet_core_userspace_connected);
  258. if (!info) {
  259. rm_err("%s", "CORE_GNL: error - info is null");
  260. pid_bps_resp.valid = 0;
  261. } else {
  262. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
  263. if (na) {
  264. if (nla_memcpy(&pid_bps_req, na,
  265. sizeof(pid_bps_req)) > 0) {
  266. is_req_valid = 1;
  267. } else {
  268. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  269. RMNET_CORE_GENL_ATTR_PID_BPS);
  270. }
  271. } else {
  272. rm_err("CORE_GNL: no info->attrs %d\n",
  273. RMNET_CORE_GENL_ATTR_PID_BPS);
  274. }
  275. }
  276. if (!rmnet_core_userspace_connected)
  277. rmnet_core_userspace_connected = 1;
  278. /* Copy to pid/byte list to the payload */
  279. if (is_req_valid) {
  280. memset(&pid_bps_resp, 0x0,
  281. sizeof(pid_bps_resp));
  282. rmnet_create_pid_bps_resp(&pid_bps_resp);
  283. }
  284. pid_bps_resp.valid = 1;
  285. rmnet_core_genl_send_resp(info, &pid_bps_resp);
  286. return RMNET_GENL_SUCCESS;
  287. }
  288. int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
  289. struct genl_info *info)
  290. {
  291. struct nlattr *na;
  292. struct rmnet_core_pid_boost_req pid_boost_req;
  293. int is_req_valid = 0;
  294. u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
  295. u16 i = 0;
  296. rm_err("%s", "CORE_GNL: %s", __func__);
  297. if (!info) {
  298. rm_err("%s", "CORE_GNL: error - info is null");
  299. return RMNET_GENL_FAILURE;
  300. }
  301. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
  302. if (na) {
  303. if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
  304. is_req_valid = 1;
  305. } else {
  306. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  307. RMNET_CORE_GENL_ATTR_PID_BOOST);
  308. return RMNET_GENL_FAILURE;
  309. }
  310. } else {
  311. rm_err("CORE_GNL: no info->attrs %d\n",
  312. RMNET_CORE_GENL_ATTR_PID_BOOST);
  313. return RMNET_GENL_FAILURE;
  314. }
  315. if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
  316. boost_pid_cnt = pid_boost_req.list_len;
  317. if (!pid_boost_req.valid)
  318. boost_pid_cnt = 0;
  319. for (i = 0; i < boost_pid_cnt; i++) {
  320. if (pid_boost_req.list[i].boost_enabled) {
  321. rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
  322. pid_boost_req.list[i].boost_period);
  323. }
  324. }
  325. return RMNET_GENL_SUCCESS;
  326. }
  327. /* register new rmnet core driver generic netlink family */
  328. int rmnet_core_genl_init(void)
  329. {
  330. int ret;
  331. ret = genl_register_family(&rmnet_core_genl_family);
  332. if (ret != 0) {
  333. rm_err("CORE_GNL: register family failed: %i", ret);
  334. genl_unregister_family(&rmnet_core_genl_family);
  335. return RMNET_GENL_FAILURE;
  336. }
  337. rm_err("CORE_GNL: successfully registered generic netlink family: %s",
  338. RMNET_CORE_GENL_FAMILY_NAME);
  339. return RMNET_GENL_SUCCESS;
  340. }
  341. /* Unregister the generic netlink family */
  342. int rmnet_core_genl_deinit(void)
  343. {
  344. int ret;
  345. ret = genl_unregister_family(&rmnet_core_genl_family);
  346. if (ret != 0)
  347. rm_err("CORE_GNL: unregister family failed: %i\n", ret);
  348. return RMNET_GENL_SUCCESS;
  349. }