rmnet_map_command.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/netdevice.h>
  14. #include "rmnet_config.h"
  15. #include "rmnet_map.h"
  16. #include "rmnet_private.h"
  17. #include "rmnet_vnd.h"
  18. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  19. sizeof(struct rmnet_map_header) + \
  20. sizeof(struct rmnet_map_control_command_header))
  21. #define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
  22. sizeof(struct rmnet_map_control_command_header))
  23. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  24. sizeof(struct rmnet_map_header) + \
  25. sizeof(struct rmnet_map_control_command_header))
  26. #define RMNET_PB_IND_HDR_SIZE (sizeof(struct rmnet_map_pb_ind_hdr) + \
  27. sizeof(struct rmnet_map_header) + \
  28. sizeof(struct rmnet_map_control_command_header))
  29. static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
  30. struct rmnet_port *port,
  31. int enable)
  32. {
  33. struct rmnet_map_header *qmap;
  34. struct rmnet_map_control_command *cmd;
  35. struct rmnet_endpoint *ep;
  36. struct net_device *vnd;
  37. u16 ip_family;
  38. u16 fc_seq;
  39. u32 qos_id;
  40. u8 mux_id;
  41. int r;
  42. qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
  43. mux_id = qmap->mux_id;
  44. cmd = rmnet_map_get_cmd_start(skb);
  45. if (mux_id >= RMNET_MAX_LOGICAL_EP) {
  46. kfree_skb(skb);
  47. return RX_HANDLER_CONSUMED;
  48. }
  49. ep = rmnet_get_endpoint(port, mux_id);
  50. if (!ep) {
  51. kfree_skb(skb);
  52. return RX_HANDLER_CONSUMED;
  53. }
  54. vnd = ep->egress_dev;
  55. ip_family = cmd->flow_control.ip_family;
  56. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  57. qos_id = ntohl(cmd->flow_control.qos_id);
  58. /* Ignore the ip family and pass the sequence number for both v4 and v6
  59. * sequence. User space does not support creating dedicated flows for
  60. * the 2 protocols
  61. */
  62. r = rmnet_vnd_do_flow_control(vnd, enable);
  63. if (r) {
  64. kfree_skb(skb);
  65. return RMNET_MAP_COMMAND_UNSUPPORTED;
  66. } else {
  67. return RMNET_MAP_COMMAND_ACK;
  68. }
  69. }
  70. static void rmnet_map_send_ack(struct sk_buff *skb,
  71. unsigned char type,
  72. struct rmnet_port *port)
  73. {
  74. struct rmnet_map_control_command *cmd;
  75. struct net_device *dev = skb->dev;
  76. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  77. pskb_trim(skb,
  78. skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
  79. skb->protocol = htons(ETH_P_MAP);
  80. cmd = rmnet_map_get_cmd_start(skb);
  81. cmd->cmd_type = type & 0x03;
  82. netif_tx_lock(dev);
  83. dev->netdev_ops->ndo_start_xmit(skb, dev);
  84. netif_tx_unlock(dev);
  85. }
  86. void
  87. rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
  88. struct rmnet_map_dl_ind_hdr *dlhdr,
  89. struct rmnet_map_control_command_header *qcmd)
  90. {
  91. struct rmnet_map_dl_ind *tmp;
  92. list_for_each_entry(tmp, &port->dl_list, list)
  93. tmp->dl_hdr_handler_v2(dlhdr, qcmd);
  94. }
  95. void
  96. rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
  97. struct rmnet_map_dl_ind_trl *dltrl,
  98. struct rmnet_map_control_command_header *qcmd)
  99. {
  100. struct rmnet_map_dl_ind *tmp;
  101. list_for_each_entry(tmp, &port->dl_list, list)
  102. tmp->dl_trl_handler_v2(dltrl, qcmd);
  103. }
  104. void rmnet_map_pb_ind_notify(struct rmnet_port *port,
  105. struct rmnet_map_pb_ind_hdr *pbhdr)
  106. {
  107. struct rmnet_map_pb_ind *tmp;
  108. list_for_each_entry(tmp, &port->pb_list, list) {
  109. port->stats.pb_marker_seq = pbhdr->le.seq_num;
  110. tmp->pb_ind_handler(pbhdr);
  111. }
  112. }
  113. static void rmnet_map_process_pb_ind(struct sk_buff *skb,
  114. struct rmnet_port *port,
  115. bool rmnet_perf)
  116. {
  117. struct rmnet_map_pb_ind_hdr *pbhdr;
  118. u32 data_format;
  119. bool is_dl_mark_v2;
  120. if (skb->len < RMNET_PB_IND_HDR_SIZE)
  121. return;
  122. data_format = port->data_format;
  123. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  124. if (is_dl_mark_v2) {
  125. pskb_pull(skb, sizeof(struct rmnet_map_header) +
  126. sizeof(struct rmnet_map_control_command_header));
  127. }
  128. pbhdr = (struct rmnet_map_pb_ind_hdr *)rmnet_map_data_ptr(skb);
  129. port->stats.pb_marker_count++;
  130. if (is_dl_mark_v2)
  131. rmnet_map_pb_ind_notify(port, pbhdr);
  132. if (rmnet_perf) {
  133. unsigned int pull_size;
  134. pull_size = sizeof(struct rmnet_map_pb_ind_hdr);
  135. if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  136. pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
  137. pskb_pull(skb, pull_size);
  138. }
  139. }
  140. static void rmnet_map_process_flow_start(struct sk_buff *skb,
  141. struct rmnet_port *port,
  142. bool rmnet_perf)
  143. {
  144. struct rmnet_map_dl_ind_hdr *dlhdr;
  145. struct rmnet_map_control_command_header *qcmd;
  146. u32 data_format;
  147. bool is_dl_mark_v2;
  148. if (skb->len < RMNET_DL_IND_HDR_SIZE)
  149. return;
  150. data_format = port->data_format;
  151. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  152. if (is_dl_mark_v2) {
  153. pskb_pull(skb, sizeof(struct rmnet_map_header));
  154. qcmd = (struct rmnet_map_control_command_header *)
  155. rmnet_map_data_ptr(skb);
  156. port->stats.dl_hdr_last_ep_id = qcmd->source_id;
  157. port->stats.dl_hdr_last_qmap_vers = qcmd->reserved;
  158. port->stats.dl_hdr_last_trans_id = qcmd->transaction_id;
  159. pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
  160. }
  161. dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
  162. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  163. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  164. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  165. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  166. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  167. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  168. port->stats.dl_hdr_count++;
  169. if (is_dl_mark_v2)
  170. rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd);
  171. if (rmnet_perf) {
  172. unsigned int pull_size;
  173. pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
  174. if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  175. pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
  176. pskb_pull(skb, pull_size);
  177. }
  178. }
  179. static void rmnet_map_process_flow_end(struct sk_buff *skb,
  180. struct rmnet_port *port,
  181. bool rmnet_perf)
  182. {
  183. struct rmnet_map_dl_ind_trl *dltrl;
  184. struct rmnet_map_control_command_header *qcmd;
  185. u32 data_format;
  186. bool is_dl_mark_v2;
  187. if (skb->len < RMNET_DL_IND_TRL_SIZE)
  188. return;
  189. data_format = port->data_format;
  190. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  191. if (is_dl_mark_v2) {
  192. pskb_pull(skb, sizeof(struct rmnet_map_header));
  193. qcmd = (struct rmnet_map_control_command_header *)
  194. rmnet_map_data_ptr(skb);
  195. pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
  196. }
  197. dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
  198. port->stats.dl_trl_last_seq = dltrl->seq_le;
  199. port->stats.dl_trl_count++;
  200. if (is_dl_mark_v2)
  201. rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd);
  202. if (rmnet_perf) {
  203. unsigned int pull_size;
  204. pull_size = sizeof(struct rmnet_map_dl_ind_trl);
  205. if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  206. pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
  207. pskb_pull(skb, pull_size);
  208. }
  209. }
  210. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  211. * name is decoded here and appropriate handler is called.
  212. */
  213. void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
  214. {
  215. struct rmnet_map_control_command *cmd;
  216. unsigned char command_name;
  217. unsigned char rc = 0;
  218. cmd = rmnet_map_get_cmd_start(skb);
  219. command_name = cmd->command_name;
  220. switch (command_name) {
  221. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  222. rc = rmnet_map_do_flow_control(skb, port, 1);
  223. break;
  224. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  225. rc = rmnet_map_do_flow_control(skb, port, 0);
  226. break;
  227. default:
  228. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  229. kfree_skb(skb);
  230. break;
  231. }
  232. if (rc == RMNET_MAP_COMMAND_ACK)
  233. rmnet_map_send_ack(skb, rc, port);
  234. }
  235. int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
  236. bool rmnet_perf)
  237. {
  238. struct rmnet_map_control_command *cmd;
  239. unsigned char command_name;
  240. cmd = rmnet_map_get_cmd_start(skb);
  241. command_name = cmd->command_name;
  242. /* Silently discard any markers on the LL channel */
  243. if (skb->priority == 0xda1a &&
  244. (command_name == RMNET_MAP_COMMAND_FLOW_START ||
  245. command_name == RMNET_MAP_COMMAND_FLOW_END)) {
  246. if (!rmnet_perf)
  247. consume_skb(skb);
  248. return 0;
  249. }
  250. switch (command_name) {
  251. case RMNET_MAP_COMMAND_FLOW_START:
  252. rmnet_map_process_flow_start(skb, port, rmnet_perf);
  253. break;
  254. case RMNET_MAP_COMMAND_FLOW_END:
  255. rmnet_map_process_flow_end(skb, port, rmnet_perf);
  256. break;
  257. case RMNET_MAP_COMMAND_PB_BYTES:
  258. rmnet_map_process_pb_ind(skb, port, rmnet_perf);
  259. break;
  260. default:
  261. return 1;
  262. }
  263. /* rmnet_perf module will handle the consuming */
  264. if (!rmnet_perf)
  265. consume_skb(skb);
  266. return 0;
  267. }
  268. EXPORT_SYMBOL(rmnet_map_flow_command);
  269. void rmnet_map_cmd_exit(struct rmnet_port *port)
  270. {
  271. struct rmnet_map_dl_ind *tmp, *idx;
  272. list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
  273. list_del_rcu(&tmp->list);
  274. list_for_each_entry_safe(tmp, idx, &port->pb_list, list)
  275. list_del_rcu(&tmp->list);
  276. }
  277. void rmnet_map_cmd_init(struct rmnet_port *port)
  278. {
  279. INIT_LIST_HEAD(&port->dl_list);
  280. INIT_LIST_HEAD(&port->pb_list);
  281. }
  282. int rmnet_map_dl_ind_register(struct rmnet_port *port,
  283. struct rmnet_map_dl_ind *dl_ind)
  284. {
  285. struct rmnet_map_dl_ind *dl_ind_iterator;
  286. bool empty_ind_list = true;
  287. if (!port || !dl_ind || !dl_ind->dl_hdr_handler_v2 ||
  288. !dl_ind->dl_trl_handler_v2)
  289. return -EINVAL;
  290. list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
  291. empty_ind_list = false;
  292. if (dl_ind_iterator->priority < dl_ind->priority) {
  293. if (dl_ind_iterator->list.next) {
  294. if (dl_ind->priority
  295. < list_entry_rcu(dl_ind_iterator->list.next,
  296. typeof(*dl_ind_iterator), list)->priority) {
  297. list_add_rcu(&dl_ind->list,
  298. &dl_ind_iterator->list);
  299. break;
  300. }
  301. } else {
  302. list_add_rcu(&dl_ind->list,
  303. &dl_ind_iterator->list);
  304. break;
  305. }
  306. } else {
  307. list_add_tail_rcu(&dl_ind->list,
  308. &dl_ind_iterator->list);
  309. break;
  310. }
  311. }
  312. if (empty_ind_list)
  313. list_add_rcu(&dl_ind->list, &port->dl_list);
  314. return 0;
  315. }
  316. EXPORT_SYMBOL(rmnet_map_dl_ind_register);
  317. int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
  318. struct rmnet_map_dl_ind *dl_ind)
  319. {
  320. struct rmnet_map_dl_ind *tmp;
  321. if (!port || !dl_ind || !(port->dl_list.next))
  322. return -EINVAL;
  323. list_for_each_entry(tmp, &port->dl_list, list) {
  324. if (tmp == dl_ind) {
  325. list_del_rcu(&dl_ind->list);
  326. goto done;
  327. }
  328. }
  329. done:
  330. return 0;
  331. }
  332. EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);
  333. int rmnet_map_pb_ind_register(struct rmnet_port *port,
  334. struct rmnet_map_pb_ind *pb_ind)
  335. {
  336. struct rmnet_map_pb_ind *pb_ind_iterator;
  337. bool empty_ind_list = true;
  338. if (!port || !pb_ind || !pb_ind->pb_ind_handler)
  339. return -EINVAL;
  340. list_for_each_entry_rcu(pb_ind_iterator, &port->pb_list, list) {
  341. empty_ind_list = false;
  342. if (pb_ind_iterator->priority < pb_ind->priority) {
  343. if (pb_ind_iterator->list.next) {
  344. if (pb_ind->priority
  345. < list_entry_rcu(pb_ind_iterator->list.next,
  346. typeof(*pb_ind_iterator), list)->priority) {
  347. list_add_rcu(&pb_ind->list,
  348. &pb_ind_iterator->list);
  349. break;
  350. }
  351. } else {
  352. list_add_rcu(&pb_ind->list,
  353. &pb_ind_iterator->list);
  354. break;
  355. }
  356. } else {
  357. list_add_tail_rcu(&pb_ind->list,
  358. &pb_ind_iterator->list);
  359. break;
  360. }
  361. }
  362. if (empty_ind_list)
  363. list_add_rcu(&pb_ind->list, &port->pb_list);
  364. return 0;
  365. }
  366. EXPORT_SYMBOL(rmnet_map_pb_ind_register);
  367. int rmnet_map_pb_ind_deregister(struct rmnet_port *port,
  368. struct rmnet_map_pb_ind *pb_ind)
  369. {
  370. struct rmnet_map_pb_ind *tmp;
  371. if (!port || !pb_ind || !(port->pb_list.next))
  372. return -EINVAL;
  373. list_for_each_entry(tmp, &port->pb_list, list) {
  374. if (tmp == pb_ind) {
  375. list_del_rcu(&pb_ind->list);
  376. goto done;
  377. }
  378. }
  379. done:
  380. return 0;
  381. }
  382. EXPORT_SYMBOL(rmnet_map_pb_ind_deregister);