rmnet_map_command.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/netdevice.h>
  13. #include "rmnet_config.h"
  14. #include "rmnet_map.h"
  15. #include "rmnet_private.h"
  16. #include "rmnet_vnd.h"
  17. #define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
  18. sizeof(struct rmnet_map_header) + \
  19. sizeof(struct rmnet_map_control_command_header))
  20. #define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
  21. sizeof(struct rmnet_map_control_command_header))
  22. #define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
  23. sizeof(struct rmnet_map_header) + \
  24. sizeof(struct rmnet_map_control_command_header))
  25. static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
  26. struct rmnet_port *port,
  27. int enable)
  28. {
  29. struct rmnet_map_header *qmap;
  30. struct rmnet_map_control_command *cmd;
  31. struct rmnet_endpoint *ep;
  32. struct net_device *vnd;
  33. u16 ip_family;
  34. u16 fc_seq;
  35. u32 qos_id;
  36. u8 mux_id;
  37. int r;
  38. qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
  39. mux_id = qmap->mux_id;
  40. cmd = rmnet_map_get_cmd_start(skb);
  41. if (mux_id >= RMNET_MAX_LOGICAL_EP) {
  42. kfree_skb(skb);
  43. return RX_HANDLER_CONSUMED;
  44. }
  45. ep = rmnet_get_endpoint(port, mux_id);
  46. if (!ep) {
  47. kfree_skb(skb);
  48. return RX_HANDLER_CONSUMED;
  49. }
  50. vnd = ep->egress_dev;
  51. ip_family = cmd->flow_control.ip_family;
  52. fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
  53. qos_id = ntohl(cmd->flow_control.qos_id);
  54. /* Ignore the ip family and pass the sequence number for both v4 and v6
  55. * sequence. User space does not support creating dedicated flows for
  56. * the 2 protocols
  57. */
  58. r = rmnet_vnd_do_flow_control(vnd, enable);
  59. if (r) {
  60. kfree_skb(skb);
  61. return RMNET_MAP_COMMAND_UNSUPPORTED;
  62. } else {
  63. return RMNET_MAP_COMMAND_ACK;
  64. }
  65. }
  66. static void rmnet_map_send_ack(struct sk_buff *skb,
  67. unsigned char type,
  68. struct rmnet_port *port)
  69. {
  70. struct rmnet_map_control_command *cmd;
  71. struct net_device *dev = skb->dev;
  72. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  73. pskb_trim(skb,
  74. skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
  75. skb->protocol = htons(ETH_P_MAP);
  76. cmd = rmnet_map_get_cmd_start(skb);
  77. cmd->cmd_type = type & 0x03;
  78. netif_tx_lock(dev);
  79. dev->netdev_ops->ndo_start_xmit(skb, dev);
  80. netif_tx_unlock(dev);
  81. }
  82. void
  83. rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
  84. struct rmnet_map_dl_ind_hdr *dlhdr,
  85. struct rmnet_map_control_command_header *qcmd)
  86. {
  87. struct rmnet_map_dl_ind *tmp;
  88. list_for_each_entry(tmp, &port->dl_list, list)
  89. tmp->dl_hdr_handler_v2(dlhdr, qcmd);
  90. }
  91. void
  92. rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
  93. struct rmnet_map_dl_ind_trl *dltrl,
  94. struct rmnet_map_control_command_header *qcmd)
  95. {
  96. struct rmnet_map_dl_ind *tmp;
  97. list_for_each_entry(tmp, &port->dl_list, list)
  98. tmp->dl_trl_handler_v2(dltrl, qcmd);
  99. }
  100. static void rmnet_map_process_flow_start(struct sk_buff *skb,
  101. struct rmnet_port *port,
  102. bool rmnet_perf)
  103. {
  104. struct rmnet_map_dl_ind_hdr *dlhdr;
  105. struct rmnet_map_control_command_header *qcmd;
  106. u32 data_format;
  107. bool is_dl_mark_v2;
  108. if (skb->len < RMNET_DL_IND_HDR_SIZE)
  109. return;
  110. data_format = port->data_format;
  111. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  112. if (is_dl_mark_v2) {
  113. pskb_pull(skb, sizeof(struct rmnet_map_header));
  114. qcmd = (struct rmnet_map_control_command_header *)
  115. rmnet_map_data_ptr(skb);
  116. port->stats.dl_hdr_last_ep_id = qcmd->source_id;
  117. port->stats.dl_hdr_last_qmap_vers = qcmd->reserved;
  118. port->stats.dl_hdr_last_trans_id = qcmd->transaction_id;
  119. pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
  120. }
  121. dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
  122. port->stats.dl_hdr_last_seq = dlhdr->le.seq;
  123. port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
  124. port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
  125. port->stats.dl_hdr_last_flows = dlhdr->le.flows;
  126. port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
  127. port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
  128. port->stats.dl_hdr_count++;
  129. if (is_dl_mark_v2)
  130. rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd);
  131. if (rmnet_perf) {
  132. unsigned int pull_size;
  133. pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
  134. if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  135. pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
  136. pskb_pull(skb, pull_size);
  137. }
  138. }
  139. static void rmnet_map_process_flow_end(struct sk_buff *skb,
  140. struct rmnet_port *port,
  141. bool rmnet_perf)
  142. {
  143. struct rmnet_map_dl_ind_trl *dltrl;
  144. struct rmnet_map_control_command_header *qcmd;
  145. u32 data_format;
  146. bool is_dl_mark_v2;
  147. if (skb->len < RMNET_DL_IND_TRL_SIZE)
  148. return;
  149. data_format = port->data_format;
  150. is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
  151. if (is_dl_mark_v2) {
  152. pskb_pull(skb, sizeof(struct rmnet_map_header));
  153. qcmd = (struct rmnet_map_control_command_header *)
  154. rmnet_map_data_ptr(skb);
  155. pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
  156. }
  157. dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
  158. port->stats.dl_trl_last_seq = dltrl->seq_le;
  159. port->stats.dl_trl_count++;
  160. if (is_dl_mark_v2)
  161. rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd);
  162. if (rmnet_perf) {
  163. unsigned int pull_size;
  164. pull_size = sizeof(struct rmnet_map_dl_ind_trl);
  165. if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  166. pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
  167. pskb_pull(skb, pull_size);
  168. }
  169. }
  170. /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  171. * name is decoded here and appropriate handler is called.
  172. */
  173. void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
  174. {
  175. struct rmnet_map_control_command *cmd;
  176. unsigned char command_name;
  177. unsigned char rc = 0;
  178. cmd = rmnet_map_get_cmd_start(skb);
  179. command_name = cmd->command_name;
  180. switch (command_name) {
  181. case RMNET_MAP_COMMAND_FLOW_ENABLE:
  182. rc = rmnet_map_do_flow_control(skb, port, 1);
  183. break;
  184. case RMNET_MAP_COMMAND_FLOW_DISABLE:
  185. rc = rmnet_map_do_flow_control(skb, port, 0);
  186. break;
  187. default:
  188. rc = RMNET_MAP_COMMAND_UNSUPPORTED;
  189. kfree_skb(skb);
  190. break;
  191. }
  192. if (rc == RMNET_MAP_COMMAND_ACK)
  193. rmnet_map_send_ack(skb, rc, port);
  194. }
  195. int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
  196. bool rmnet_perf)
  197. {
  198. struct rmnet_map_control_command *cmd;
  199. unsigned char command_name;
  200. cmd = rmnet_map_get_cmd_start(skb);
  201. command_name = cmd->command_name;
  202. /* Silently discard any markers on the LL channel */
  203. if (skb->priority == 0xda1a &&
  204. (command_name == RMNET_MAP_COMMAND_FLOW_START ||
  205. command_name == RMNET_MAP_COMMAND_FLOW_END)) {
  206. if (!rmnet_perf)
  207. consume_skb(skb);
  208. return 0;
  209. }
  210. switch (command_name) {
  211. case RMNET_MAP_COMMAND_FLOW_START:
  212. rmnet_map_process_flow_start(skb, port, rmnet_perf);
  213. break;
  214. case RMNET_MAP_COMMAND_FLOW_END:
  215. rmnet_map_process_flow_end(skb, port, rmnet_perf);
  216. break;
  217. default:
  218. return 1;
  219. }
  220. /* rmnet_perf module will handle the consuming */
  221. if (!rmnet_perf)
  222. consume_skb(skb);
  223. return 0;
  224. }
  225. EXPORT_SYMBOL(rmnet_map_flow_command);
  226. void rmnet_map_cmd_exit(struct rmnet_port *port)
  227. {
  228. struct rmnet_map_dl_ind *tmp, *idx;
  229. list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
  230. list_del_rcu(&tmp->list);
  231. }
  232. void rmnet_map_cmd_init(struct rmnet_port *port)
  233. {
  234. INIT_LIST_HEAD(&port->dl_list);
  235. }
  236. int rmnet_map_dl_ind_register(struct rmnet_port *port,
  237. struct rmnet_map_dl_ind *dl_ind)
  238. {
  239. struct rmnet_map_dl_ind *dl_ind_iterator;
  240. bool empty_ind_list = true;
  241. if (!port || !dl_ind || !dl_ind->dl_hdr_handler_v2 ||
  242. !dl_ind->dl_trl_handler_v2)
  243. return -EINVAL;
  244. list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
  245. empty_ind_list = false;
  246. if (dl_ind_iterator->priority < dl_ind->priority) {
  247. if (dl_ind_iterator->list.next) {
  248. if (dl_ind->priority
  249. < list_entry_rcu(dl_ind_iterator->list.next,
  250. typeof(*dl_ind_iterator), list)->priority) {
  251. list_add_rcu(&dl_ind->list,
  252. &dl_ind_iterator->list);
  253. break;
  254. }
  255. } else {
  256. list_add_rcu(&dl_ind->list,
  257. &dl_ind_iterator->list);
  258. break;
  259. }
  260. } else {
  261. list_add_tail_rcu(&dl_ind->list,
  262. &dl_ind_iterator->list);
  263. break;
  264. }
  265. }
  266. if (empty_ind_list)
  267. list_add_rcu(&dl_ind->list, &port->dl_list);
  268. return 0;
  269. }
  270. EXPORT_SYMBOL(rmnet_map_dl_ind_register);
  271. int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
  272. struct rmnet_map_dl_ind *dl_ind)
  273. {
  274. struct rmnet_map_dl_ind *tmp;
  275. if (!port || !dl_ind)
  276. return -EINVAL;
  277. list_for_each_entry(tmp, &port->dl_list, list) {
  278. if (tmp == dl_ind) {
  279. list_del_rcu(&dl_ind->list);
  280. goto done;
  281. }
  282. }
  283. done:
  284. return 0;
  285. }
  286. EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);