rmnet_handlers.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Data ingress/egress handler
  13. *
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/netdev_features.h>
  17. #include <linux/if_arp.h>
  18. #include <linux/ip.h>
  19. #include <linux/ipv6.h>
  20. #include <net/sock.h>
  21. #include <linux/tracepoint.h>
  22. #include "rmnet_private.h"
  23. #include "rmnet_config.h"
  24. #include "rmnet_vnd.h"
  25. #include "rmnet_map.h"
  26. #include "rmnet_handlers.h"
  27. #include "rmnet_descriptor.h"
  28. #include "rmnet_qmi.h"
  29. #include "qmi_rmnet.h"
  30. #define RMNET_IP_VERSION_4 0x40
  31. #define RMNET_IP_VERSION_6 0x60
  32. #define CREATE_TRACE_POINTS
  33. #include "rmnet_trace.h"
  34. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
  35. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
  36. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
  37. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
  38. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
  39. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
  40. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
  41. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
  42. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
  43. EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
  44. EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
  45. EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
  46. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
  47. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
  48. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
  49. /* Helper Functions */
  50. void rmnet_set_skb_proto(struct sk_buff *skb)
  51. {
  52. switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
  53. case RMNET_IP_VERSION_4:
  54. skb->protocol = htons(ETH_P_IP);
  55. break;
  56. case RMNET_IP_VERSION_6:
  57. skb->protocol = htons(ETH_P_IPV6);
  58. break;
  59. default:
  60. skb->protocol = htons(ETH_P_MAP);
  61. break;
  62. }
  63. }
  64. EXPORT_SYMBOL(rmnet_set_skb_proto);
  65. bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
  66. EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
  67. bool rmnet_slow_start_on(u32 hash_key)
  68. {
  69. bool (*rmnet_shs_slow_start_on)(u32 hash_key);
  70. rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
  71. if (rmnet_shs_slow_start_on)
  72. return rmnet_shs_slow_start_on(hash_key);
  73. return false;
  74. }
  75. EXPORT_SYMBOL(rmnet_slow_start_on);
  76. /* Shs hook handler */
  77. int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
  78. struct rmnet_port *port) __rcu __read_mostly;
  79. EXPORT_SYMBOL(rmnet_shs_skb_entry);
  80. /* Shs hook handler for work queue*/
  81. int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
  82. struct rmnet_port *port) __rcu __read_mostly;
  83. EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
  84. /* Generic handler */
  85. void
  86. rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
  87. {
  88. int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
  89. trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
  90. 0xDEF, 0xDEF, (void *)skb, NULL);
  91. skb_reset_transport_header(skb);
  92. skb_reset_network_header(skb);
  93. rmnet_vnd_rx_fixup(skb->dev, skb->len);
  94. skb->pkt_type = PACKET_HOST;
  95. skb_set_mac_header(skb, 0);
  96. rcu_read_lock();
  97. rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
  98. if (rmnet_shs_stamp) {
  99. rmnet_shs_stamp(skb, port);
  100. rcu_read_unlock();
  101. return;
  102. }
  103. rcu_read_unlock();
  104. netif_receive_skb(skb);
  105. }
  106. EXPORT_SYMBOL(rmnet_deliver_skb);
  107. /* Important to note, port cannot be used here if it has gone stale */
  108. void
  109. rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
  110. enum rmnet_packet_context ctx)
  111. {
  112. int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
  113. struct rmnet_priv *priv = netdev_priv(skb->dev);
  114. trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
  115. 0xDEF, 0xDEF, (void *)skb, NULL);
  116. skb_reset_transport_header(skb);
  117. skb_reset_network_header(skb);
  118. rmnet_vnd_rx_fixup(skb->dev, skb->len);
  119. skb->pkt_type = PACKET_HOST;
  120. skb_set_mac_header(skb, 0);
  121. /* packets coming from work queue context due to packet flush timer
  122. * must go through the special workqueue path in SHS driver
  123. */
  124. rcu_read_lock();
  125. rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
  126. rcu_dereference(rmnet_shs_skb_entry_wq);
  127. if (rmnet_shs_stamp) {
  128. rmnet_shs_stamp(skb, port);
  129. rcu_read_unlock();
  130. return;
  131. }
  132. rcu_read_unlock();
  133. if (ctx == RMNET_NET_RX_CTX)
  134. netif_receive_skb(skb);
  135. else
  136. gro_cells_receive(&priv->gro_cells, skb);
  137. }
  138. EXPORT_SYMBOL(rmnet_deliver_skb_wq);
  139. /* Deliver a list of skbs after undoing coalescing */
  140. static void rmnet_deliver_skb_list(struct sk_buff_head *head,
  141. struct rmnet_port *port)
  142. {
  143. struct sk_buff *skb;
  144. while ((skb = __skb_dequeue(head))) {
  145. rmnet_set_skb_proto(skb);
  146. rmnet_deliver_skb(skb, port);
  147. }
  148. }
  149. /* MAP handler */
  150. static void
  151. __rmnet_map_ingress_handler(struct sk_buff *skb,
  152. struct rmnet_port *port)
  153. {
  154. struct rmnet_map_header *qmap;
  155. struct rmnet_endpoint *ep;
  156. struct sk_buff_head list;
  157. u16 len, pad;
  158. u8 mux_id;
  159. /* We don't need the spinlock since only we touch this */
  160. __skb_queue_head_init(&list);
  161. qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
  162. if (qmap->cd_bit) {
  163. qmi_rmnet_set_dl_msg_active(port);
  164. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  165. if (!rmnet_map_flow_command(skb, port, false))
  166. return;
  167. }
  168. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  169. return rmnet_map_command(skb, port);
  170. goto free_skb;
  171. }
  172. mux_id = qmap->mux_id;
  173. pad = qmap->pad_len;
  174. len = ntohs(qmap->pkt_len) - pad;
  175. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  176. goto free_skb;
  177. ep = rmnet_get_endpoint(port, mux_id);
  178. if (!ep)
  179. goto free_skb;
  180. skb->dev = ep->egress_dev;
  181. /* Handle QMAPv5 packet */
  182. if (qmap->next_hdr &&
  183. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  184. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  185. if (rmnet_map_process_next_hdr_packet(skb, &list, len))
  186. goto free_skb;
  187. } else {
  188. /* We only have the main QMAP header to worry about */
  189. pskb_pull(skb, sizeof(*qmap));
  190. rmnet_set_skb_proto(skb);
  191. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  192. if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
  193. skb->ip_summed = CHECKSUM_UNNECESSARY;
  194. }
  195. pskb_trim(skb, len);
  196. /* Push the single packet onto the list */
  197. __skb_queue_tail(&list, skb);
  198. }
  199. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  200. qmi_rmnet_work_maybe_restart(port);
  201. rmnet_deliver_skb_list(&list, port);
  202. return;
  203. free_skb:
  204. kfree_skb(skb);
  205. }
  206. int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
  207. struct rmnet_port *port) __rcu __read_mostly;
  208. EXPORT_SYMBOL(rmnet_perf_deag_entry);
  209. static void
  210. rmnet_map_ingress_handler(struct sk_buff *skb,
  211. struct rmnet_port *port)
  212. {
  213. struct sk_buff *skbn;
  214. int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
  215. struct rmnet_port *port);
  216. if (skb->dev->type == ARPHRD_ETHER) {
  217. if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
  218. kfree_skb(skb);
  219. return;
  220. }
  221. skb_push(skb, ETH_HLEN);
  222. }
  223. if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  224. RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
  225. if (skb_is_nonlinear(skb)) {
  226. rmnet_frag_ingress_handler(skb, port);
  227. return;
  228. }
  229. }
  230. /* No aggregation. Pass the frame on as is */
  231. if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
  232. __rmnet_map_ingress_handler(skb, port);
  233. return;
  234. }
  235. /* Pass off handling to rmnet_perf module, if present */
  236. rcu_read_lock();
  237. rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
  238. if (rmnet_perf_core_deaggregate) {
  239. rmnet_perf_core_deaggregate(skb, port);
  240. rcu_read_unlock();
  241. return;
  242. }
  243. rcu_read_unlock();
  244. /* Deaggregation and freeing of HW originating
  245. * buffers is done within here
  246. */
  247. while (skb) {
  248. struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
  249. skb_shinfo(skb)->frag_list = NULL;
  250. while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
  251. __rmnet_map_ingress_handler(skbn, port);
  252. if (skbn == skb)
  253. goto next_skb;
  254. }
  255. consume_skb(skb);
  256. next_skb:
  257. skb = skb_frag;
  258. }
  259. }
  260. static int rmnet_map_egress_handler(struct sk_buff *skb,
  261. struct rmnet_port *port, u8 mux_id,
  262. struct net_device *orig_dev)
  263. {
  264. int required_headroom, additional_header_len, csum_type;
  265. struct rmnet_map_header *map_header;
  266. additional_header_len = 0;
  267. required_headroom = sizeof(struct rmnet_map_header);
  268. csum_type = 0;
  269. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
  270. additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
  271. csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
  272. } else if ((port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) ||
  273. (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
  274. additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
  275. csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
  276. }
  277. required_headroom += additional_header_len;
  278. if (skb_headroom(skb) < required_headroom) {
  279. if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
  280. return -ENOMEM;
  281. }
  282. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  283. qmi_rmnet_work_maybe_restart(port);
  284. if (csum_type)
  285. rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
  286. csum_type);
  287. map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
  288. port);
  289. if (!map_header)
  290. return -ENOMEM;
  291. map_header->mux_id = mux_id;
  292. if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
  293. if (rmnet_map_tx_agg_skip(skb, required_headroom))
  294. goto done;
  295. rmnet_map_tx_aggregate(skb, port);
  296. return -EINPROGRESS;
  297. }
  298. done:
  299. skb->protocol = htons(ETH_P_MAP);
  300. return 0;
  301. }
  302. static void
  303. rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
  304. {
  305. if (bridge_dev) {
  306. skb->dev = bridge_dev;
  307. dev_queue_xmit(skb);
  308. }
  309. }
  310. /* Ingress / Egress Entry Points */
  311. /* Processes packet as per ingress data format for receiving device. Logical
  312. * endpoint is determined from packet inspection. Packet is then sent to the
  313. * egress device listed in the logical endpoint configuration.
  314. */
  315. rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
  316. {
  317. struct sk_buff *skb = *pskb;
  318. struct rmnet_port *port;
  319. struct net_device *dev;
  320. if (!skb)
  321. goto done;
  322. if (skb->pkt_type == PACKET_LOOPBACK)
  323. return RX_HANDLER_PASS;
  324. trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
  325. 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
  326. dev = skb->dev;
  327. port = rmnet_get_port(dev);
  328. switch (port->rmnet_mode) {
  329. case RMNET_EPMODE_VND:
  330. rmnet_map_ingress_handler(skb, port);
  331. break;
  332. case RMNET_EPMODE_BRIDGE:
  333. rmnet_bridge_handler(skb, port->bridge_ep);
  334. break;
  335. }
  336. done:
  337. return RX_HANDLER_CONSUMED;
  338. }
  339. EXPORT_SYMBOL(rmnet_rx_handler);
  340. /* Modifies packet as per logical endpoint configuration and egress data format
  341. * for egress device configured in logical endpoint. Packet is then transmitted
  342. * on the egress device.
  343. */
  344. void rmnet_egress_handler(struct sk_buff *skb)
  345. {
  346. struct net_device *orig_dev;
  347. struct rmnet_port *port;
  348. struct rmnet_priv *priv;
  349. u8 mux_id;
  350. int err;
  351. u32 skb_len;
  352. trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
  353. 0xDEF, (void *)skb, NULL);
  354. sk_pacing_shift_update(skb->sk, 8);
  355. orig_dev = skb->dev;
  356. priv = netdev_priv(orig_dev);
  357. skb->dev = priv->real_dev;
  358. mux_id = priv->mux_id;
  359. port = rmnet_get_port(skb->dev);
  360. if (!port)
  361. goto drop;
  362. skb_len = skb->len;
  363. err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
  364. if (err == -ENOMEM) {
  365. goto drop;
  366. } else if (err == -EINPROGRESS) {
  367. rmnet_vnd_tx_fixup(orig_dev, skb_len);
  368. return;
  369. }
  370. rmnet_vnd_tx_fixup(orig_dev, skb_len);
  371. dev_queue_xmit(skb);
  372. return;
  373. drop:
  374. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  375. kfree_skb(skb);
  376. }