rmnet_genl.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  3. *
  4. * RMNET Data Generic Netlink
  5. *
  6. */
  7. #include "rmnet_genl.h"
  8. #include <net/sock.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/ktime.h>
  11. #define RMNET_CORE_GENL_MAX_STR_LEN 255
  12. /* Static Functions and Definitions */
  13. static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
  14. 1] = {
  15. [RMNET_CORE_GENL_ATTR_INT] = { .type = NLA_S32 },
  16. [RMNET_CORE_GENL_ATTR_PID_BPS] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_bps_resp)),
  17. [RMNET_CORE_GENL_ATTR_PID_BOOST] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_boost_req)),
  18. [RMNET_CORE_GENL_ATTR_TETHER_INFO] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_tether_info_req)),
  19. [RMNET_CORE_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len =
  20. RMNET_CORE_GENL_MAX_STR_LEN },
  21. };
  22. #define RMNET_CORE_GENL_OP(_cmd, _func) \
  23. { \
  24. .cmd = _cmd, \
  25. .doit = _func, \
  26. .dumpit = NULL, \
  27. .flags = 0, \
  28. }
  29. static const struct genl_ops rmnet_core_genl_ops[] = {
  30. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
  31. rmnet_core_genl_pid_bps_req_hdlr),
  32. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
  33. rmnet_core_genl_pid_boost_req_hdlr),
  34. RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_TETHER_INFO_REQ,
  35. rmnet_core_genl_tether_info_req_hdlr),
  36. };
  37. struct genl_family rmnet_core_genl_family = {
  38. .hdrsize = 0,
  39. .name = RMNET_CORE_GENL_FAMILY_NAME,
  40. .version = RMNET_CORE_GENL_VERSION,
  41. .maxattr = RMNET_CORE_GENL_ATTR_MAX,
  42. .policy = rmnet_genl_attr_policy,
  43. .ops = rmnet_core_genl_ops,
  44. .n_ops = ARRAY_SIZE(rmnet_core_genl_ops),
  45. };
  46. #define RMNET_PID_STATS_HT_SIZE (8)
  47. #define RMNET_PID_STATS_HT rmnet_pid_ht
  48. DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
  49. /* Spinlock definition for pid hash table */
  50. static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
  51. #define RMNET_GENL_SEC_TO_MSEC(x) ((x) * 1000)
  52. #define RMNET_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
  53. #define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
  54. #define RMNET_GENL_NSEC_TO_SEC(x) ({\
  55. u64 __quotient = (x); \
  56. do_div(__quotient, 1000000000); \
  57. __quotient; \
  58. })
  59. int rmnet_core_userspace_connected;
  60. #define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
  61. struct rmnet_pid_node_s {
  62. struct hlist_node list;
  63. ktime_t timstamp_last_query;
  64. u64 tx_bytes;
  65. u64 tx_bytes_last_query;
  66. u64 tx_bps;
  67. u64 sched_boost_period_ms;
  68. int sched_boost_remaining_ms;
  69. int sched_boost_enable;
  70. pid_t pid;
  71. };
  72. typedef void (*rmnet_perf_tether_cmd_hook_t)(u8 message, u64 val);
  73. rmnet_perf_tether_cmd_hook_t rmnet_perf_tether_cmd_hook __rcu __read_mostly;
  74. EXPORT_SYMBOL(rmnet_perf_tether_cmd_hook);
  75. void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
  76. int *boost_enable, u64 *boost_period)
  77. {
  78. struct hlist_node *tmp;
  79. struct rmnet_pid_node_s *node_p;
  80. unsigned long ht_flags;
  81. u8 is_match_found = 0;
  82. u64 tx_bytes = 0;
  83. *boost_enable = 0;
  84. *boost_period = 0;
  85. /* Using do while to spin lock and unlock only once */
  86. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  87. do {
  88. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  89. list, pid) {
  90. if (pid != node_p->pid)
  91. continue;
  92. /* PID Match found */
  93. is_match_found = 1;
  94. node_p->tx_bytes += len;
  95. tx_bytes = node_p->tx_bytes;
  96. if (node_p->sched_boost_enable) {
  97. rm_err("boost triggered for pid %d",
  98. pid);
  99. /* Just triggered boost, dont re-trigger */
  100. node_p->sched_boost_enable = 0;
  101. *boost_enable = 1;
  102. *boost_period = node_p->sched_boost_period_ms;
  103. node_p->sched_boost_remaining_ms =
  104. (int)*boost_period;
  105. }
  106. break;
  107. }
  108. if (is_match_found)
  109. break;
  110. /* No PID match */
  111. node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
  112. if (!node_p)
  113. break;
  114. node_p->pid = pid;
  115. node_p->tx_bytes = len;
  116. node_p->sched_boost_enable = 0;
  117. node_p->sched_boost_period_ms = 0;
  118. node_p->sched_boost_remaining_ms = 0;
  119. hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
  120. break;
  121. } while (0);
  122. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  123. }
  124. void rmnet_boost_for_pid(pid_t pid, int boost_enable,
  125. u64 boost_period)
  126. {
  127. struct hlist_node *tmp;
  128. struct rmnet_pid_node_s *node_p;
  129. unsigned long ht_flags;
  130. /* Using do while to spin lock and unlock only once */
  131. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  132. do {
  133. hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
  134. list, pid) {
  135. if (pid != node_p->pid)
  136. continue;
  137. /* PID Match found */
  138. rm_err("CORE_BOOST: enable boost for pid %d for %llu ms",
  139. pid, boost_period);
  140. node_p->sched_boost_enable = boost_enable;
  141. node_p->sched_boost_period_ms = boost_period;
  142. break;
  143. }
  144. break;
  145. } while (0);
  146. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  147. }
  148. static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
  149. *pid_bps_resp_ptr)
  150. {
  151. struct timespec64 time;
  152. struct hlist_node *tmp;
  153. struct rmnet_pid_node_s *node_p;
  154. unsigned long ht_flags;
  155. u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
  156. int i;
  157. u16 bkt;
  158. ktime_get_real_ts64(&time);
  159. pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
  160. time.tv_nsec;
  161. /* Using do while to spin lock and unlock only once */
  162. spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
  163. do {
  164. i = 0;
  165. hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
  166. node_p, list) {
  167. tx_bytes_cur = node_p->tx_bytes;
  168. if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
  169. /* Dont send inactive pids to userspace */
  170. hash_del(&node_p->list);
  171. kfree(node_p);
  172. continue;
  173. }
  174. /* Compute bits per second */
  175. byte_diff = (node_p->tx_bytes -
  176. node_p->tx_bytes_last_query);
  177. time_diff_ns = (pid_bps_resp_ptr->timestamp -
  178. node_p->timstamp_last_query);
  179. tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
  180. /* Note that do_div returns remainder and the */
  181. /* numerator gets assigned the quotient */
  182. /* Since do_div takes the numerator as a reference, */
  183. /* a tmp_bits is used*/
  184. do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
  185. node_p->tx_bps = tmp_bits;
  186. if (node_p->sched_boost_remaining_ms >=
  187. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
  188. node_p->sched_boost_remaining_ms -=
  189. RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
  190. rm_err("CORE_BOOST: enabling boost for pid %d\n"
  191. "sched boost remaining = %d ms",
  192. node_p->pid,
  193. node_p->sched_boost_remaining_ms);
  194. } else {
  195. node_p->sched_boost_remaining_ms = 0;
  196. }
  197. pid_bps_resp_ptr->list[i].pid = node_p->pid;
  198. pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
  199. pid_bps_resp_ptr->list[i].boost_remaining_ms =
  200. node_p->sched_boost_remaining_ms;
  201. node_p->timstamp_last_query =
  202. pid_bps_resp_ptr->timestamp;
  203. node_p->tx_bytes_last_query = tx_bytes_cur;
  204. i++;
  205. /* Support copying up to 32 active pids */
  206. if (i >= RMNET_CORE_GENL_MAX_PIDS)
  207. break;
  208. }
  209. break;
  210. } while (0);
  211. spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
  212. pid_bps_resp_ptr->list_len = i;
  213. }
  214. int rmnet_core_genl_send_resp(struct genl_info *info,
  215. struct rmnet_core_pid_bps_resp *pid_bps_resp)
  216. {
  217. struct sk_buff *skb;
  218. void *msg_head;
  219. int rc;
  220. if (!info || !pid_bps_resp) {
  221. rm_err("%s", "SHS_GNL: Invalid params\n");
  222. goto out;
  223. }
  224. skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
  225. if (!skb)
  226. goto out;
  227. msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
  228. &rmnet_core_genl_family,
  229. 0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
  230. if (!msg_head) {
  231. rc = -ENOMEM;
  232. goto out;
  233. }
  234. rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
  235. sizeof(struct rmnet_core_pid_bps_resp),
  236. pid_bps_resp);
  237. if (rc != 0)
  238. goto out;
  239. genlmsg_end(skb, msg_head);
  240. rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
  241. if (rc != 0)
  242. goto out;
  243. rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
  244. return RMNET_GENL_SUCCESS;
  245. out:
  246. /* TODO: Need to free skb?? */
  247. rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
  248. rmnet_core_userspace_connected = 0;
  249. return RMNET_GENL_FAILURE;
  250. }
  251. int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
  252. struct genl_info *info)
  253. {
  254. struct nlattr *na;
  255. struct rmnet_core_pid_bps_req pid_bps_req;
  256. struct rmnet_core_pid_bps_resp pid_bps_resp;
  257. int is_req_valid = 0;
  258. rm_err("CORE_GNL: %s connected = %d", __func__,
  259. rmnet_core_userspace_connected);
  260. if (!info) {
  261. rm_err("%s", "CORE_GNL: error - info is null");
  262. pid_bps_resp.valid = 0;
  263. } else {
  264. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
  265. if (na) {
  266. if (nla_memcpy(&pid_bps_req, na,
  267. sizeof(pid_bps_req)) > 0) {
  268. is_req_valid = 1;
  269. } else {
  270. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  271. RMNET_CORE_GENL_ATTR_PID_BPS);
  272. }
  273. } else {
  274. rm_err("CORE_GNL: no info->attrs %d\n",
  275. RMNET_CORE_GENL_ATTR_PID_BPS);
  276. }
  277. }
  278. if (!rmnet_core_userspace_connected)
  279. rmnet_core_userspace_connected = 1;
  280. /* Copy to pid/byte list to the payload */
  281. memset(&pid_bps_resp, 0x0,
  282. sizeof(pid_bps_resp));
  283. if (is_req_valid) {
  284. rmnet_create_pid_bps_resp(&pid_bps_resp);
  285. }
  286. pid_bps_resp.valid = 1;
  287. rmnet_core_genl_send_resp(info, &pid_bps_resp);
  288. return RMNET_GENL_SUCCESS;
  289. }
  290. int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
  291. struct genl_info *info)
  292. {
  293. struct nlattr *na;
  294. struct rmnet_core_pid_boost_req pid_boost_req;
  295. int is_req_valid = 0;
  296. u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
  297. u16 i = 0;
  298. rm_err("CORE_GNL: %s", __func__);
  299. if (!info) {
  300. rm_err("%s", "CORE_GNL: error - info is null");
  301. return RMNET_GENL_FAILURE;
  302. }
  303. na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
  304. if (na) {
  305. if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
  306. is_req_valid = 1;
  307. } else {
  308. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  309. RMNET_CORE_GENL_ATTR_PID_BOOST);
  310. return RMNET_GENL_FAILURE;
  311. }
  312. } else {
  313. rm_err("CORE_GNL: no info->attrs %d\n",
  314. RMNET_CORE_GENL_ATTR_PID_BOOST);
  315. return RMNET_GENL_FAILURE;
  316. }
  317. if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
  318. boost_pid_cnt = pid_boost_req.list_len;
  319. if (!pid_boost_req.valid)
  320. boost_pid_cnt = 0;
  321. for (i = 0; i < boost_pid_cnt; i++) {
  322. if (pid_boost_req.list[i].boost_enabled) {
  323. rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
  324. pid_boost_req.list[i].boost_period);
  325. }
  326. }
  327. return RMNET_GENL_SUCCESS;
  328. }
  329. int rmnet_core_genl_tether_info_req_hdlr(struct sk_buff *skb_2,
  330. struct genl_info *info)
  331. {
  332. struct nlattr *na;
  333. struct rmnet_core_tether_info_req tether_info_req;
  334. int is_req_valid = 0;
  335. rmnet_perf_tether_cmd_hook_t rmnet_perf_tether_cmd;
  336. rm_err("CORE_GNL: %s", __func__);
  337. if (!info) {
  338. rm_err("%s", "CORE_GNL: error - info is null");
  339. return RMNET_GENL_FAILURE;
  340. }
  341. na = info->attrs[RMNET_CORE_GENL_ATTR_TETHER_INFO];
  342. if (na) {
  343. if (nla_memcpy(&tether_info_req, na, sizeof(tether_info_req)) > 0) {
  344. is_req_valid = 1;
  345. } else {
  346. rm_err("CORE_GNL: nla_memcpy failed %d\n",
  347. RMNET_CORE_GENL_ATTR_TETHER_INFO);
  348. return RMNET_GENL_FAILURE;
  349. }
  350. } else {
  351. rm_err("CORE_GNL: no info->attrs %d\n",
  352. RMNET_CORE_GENL_ATTR_TETHER_INFO);
  353. return RMNET_GENL_FAILURE;
  354. }
  355. if (!tether_info_req.valid) {
  356. rm_err("%s", "CORE_GNL: tether info req is invalid");
  357. return RMNET_GENL_FAILURE;
  358. }
  359. rmnet_perf_tether_cmd = rcu_dereference(rmnet_perf_tether_cmd_hook);
  360. if (rmnet_perf_tether_cmd)
  361. rmnet_perf_tether_cmd(1, tether_info_req.tether_filters_en);
  362. rm_err("CORE_GNL: tether filters %s",
  363. tether_info_req.tether_filters_en ? "enabled" : "disabled");
  364. return RMNET_GENL_SUCCESS;
  365. }
  366. /* register new rmnet core driver generic netlink family */
  367. int rmnet_core_genl_init(void)
  368. {
  369. int ret;
  370. ret = genl_register_family(&rmnet_core_genl_family);
  371. if (ret != 0) {
  372. rm_err("CORE_GNL: register family failed: %i", ret);
  373. genl_unregister_family(&rmnet_core_genl_family);
  374. return RMNET_GENL_FAILURE;
  375. }
  376. rm_err("CORE_GNL: successfully registered generic netlink family: %s",
  377. RMNET_CORE_GENL_FAMILY_NAME);
  378. return RMNET_GENL_SUCCESS;
  379. }
  380. /* Unregister the generic netlink family */
  381. int rmnet_core_genl_deinit(void)
  382. {
  383. int ret;
  384. ret = genl_unregister_family(&rmnet_core_genl_family);
  385. if (ret != 0)
  386. rm_err("CORE_GNL: unregister family failed: %i\n", ret);
  387. return RMNET_GENL_SUCCESS;
  388. }