rmnet_handlers.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Data ingress/egress handler
  13. *
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/netdev_features.h>
  17. #include <linux/if_arp.h>
  18. #include <linux/ip.h>
  19. #include <linux/ipv6.h>
  20. #include <net/sock.h>
  21. #include <linux/tracepoint.h>
  22. #include "rmnet_private.h"
  23. #include "rmnet_config.h"
  24. #include "rmnet_vnd.h"
  25. #include "rmnet_map.h"
  26. #include "rmnet_handlers.h"
  27. #include "rmnet_descriptor.h"
  28. #include "rmnet_qmi.h"
  29. #include "qmi_rmnet.h"
  30. #define RMNET_IP_VERSION_4 0x40
  31. #define RMNET_IP_VERSION_6 0x60
  32. #define CREATE_TRACE_POINTS
  33. #include "rmnet_trace.h"
  34. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
  35. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
  36. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
  37. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
  38. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
  39. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
  40. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
  41. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
  42. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
  43. EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
  44. EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
  45. EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
  46. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
  47. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
  48. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
  49. /* Helper Functions */
  50. void rmnet_set_skb_proto(struct sk_buff *skb)
  51. {
  52. switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
  53. case RMNET_IP_VERSION_4:
  54. skb->protocol = htons(ETH_P_IP);
  55. break;
  56. case RMNET_IP_VERSION_6:
  57. skb->protocol = htons(ETH_P_IPV6);
  58. break;
  59. default:
  60. skb->protocol = htons(ETH_P_MAP);
  61. break;
  62. }
  63. }
  64. EXPORT_SYMBOL(rmnet_set_skb_proto);
  65. bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
  66. EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
  67. bool rmnet_slow_start_on(u32 hash_key)
  68. {
  69. bool (*rmnet_shs_slow_start_on)(u32 hash_key);
  70. rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
  71. if (rmnet_shs_slow_start_on)
  72. return rmnet_shs_slow_start_on(hash_key);
  73. return false;
  74. }
  75. EXPORT_SYMBOL(rmnet_slow_start_on);
  76. /* Shs hook handler */
  77. int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
  78. struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
  79. EXPORT_SYMBOL(rmnet_shs_skb_entry);
  80. int (*rmnet_shs_switch)(struct sk_buff *skb,
  81. struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
  82. EXPORT_SYMBOL(rmnet_shs_switch);
  83. /* Shs hook handler for work queue*/
  84. int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
  85. struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
  86. EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
  87. /* Generic handler */
  88. void
  89. rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
  90. {
  91. int (*rmnet_shs_stamp)(struct sk_buff *skb,
  92. struct rmnet_shs_clnt_s *cfg);
  93. trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
  94. 0xDEF, 0xDEF, (void *)skb, NULL);
  95. skb_reset_transport_header(skb);
  96. skb_reset_network_header(skb);
  97. rmnet_vnd_rx_fixup(skb->dev, skb->len);
  98. skb->pkt_type = PACKET_HOST;
  99. skb_set_mac_header(skb, 0);
  100. rcu_read_lock();
  101. rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
  102. if (rmnet_shs_stamp) {
  103. rmnet_shs_stamp(skb, &port->shs_cfg);
  104. rcu_read_unlock();
  105. return;
  106. }
  107. rcu_read_unlock();
  108. netif_receive_skb(skb);
  109. }
  110. EXPORT_SYMBOL(rmnet_deliver_skb);
  111. /* Important to note, port cannot be used here if it has gone stale */
  112. void
  113. rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
  114. enum rmnet_packet_context ctx)
  115. {
  116. int (*rmnet_shs_stamp)(struct sk_buff *skb,
  117. struct rmnet_shs_clnt_s *cfg);
  118. struct rmnet_priv *priv = netdev_priv(skb->dev);
  119. trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
  120. 0xDEF, 0xDEF, (void *)skb, NULL);
  121. skb_reset_transport_header(skb);
  122. skb_reset_network_header(skb);
  123. rmnet_vnd_rx_fixup(skb->dev, skb->len);
  124. skb->pkt_type = PACKET_HOST;
  125. skb_set_mac_header(skb, 0);
  126. /* packets coming from work queue context due to packet flush timer
  127. * must go through the special workqueue path in SHS driver
  128. */
  129. rcu_read_lock();
  130. rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
  131. rcu_dereference(rmnet_shs_skb_entry_wq);
  132. if (rmnet_shs_stamp) {
  133. rmnet_shs_stamp(skb, &port->shs_cfg);
  134. rcu_read_unlock();
  135. return;
  136. }
  137. rcu_read_unlock();
  138. if (ctx == RMNET_NET_RX_CTX)
  139. netif_receive_skb(skb);
  140. else
  141. gro_cells_receive(&priv->gro_cells, skb);
  142. }
  143. EXPORT_SYMBOL(rmnet_deliver_skb_wq);
  144. /* Deliver a list of skbs after undoing coalescing */
  145. static void rmnet_deliver_skb_list(struct sk_buff_head *head,
  146. struct rmnet_port *port)
  147. {
  148. struct sk_buff *skb;
  149. while ((skb = __skb_dequeue(head))) {
  150. rmnet_set_skb_proto(skb);
  151. rmnet_deliver_skb(skb, port);
  152. }
  153. }
  154. /* MAP handler */
  155. static void
  156. __rmnet_map_ingress_handler(struct sk_buff *skb,
  157. struct rmnet_port *port)
  158. {
  159. struct rmnet_map_header *qmap;
  160. struct rmnet_endpoint *ep;
  161. struct sk_buff_head list;
  162. u16 len, pad;
  163. u8 mux_id;
  164. /* We don't need the spinlock since only we touch this */
  165. __skb_queue_head_init(&list);
  166. qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
  167. if (qmap->cd_bit) {
  168. qmi_rmnet_set_dl_msg_active(port);
  169. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  170. if (!rmnet_map_flow_command(skb, port, false))
  171. return;
  172. }
  173. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  174. return rmnet_map_command(skb, port);
  175. goto free_skb;
  176. }
  177. mux_id = qmap->mux_id;
  178. pad = qmap->pad_len;
  179. len = ntohs(qmap->pkt_len) - pad;
  180. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  181. goto free_skb;
  182. ep = rmnet_get_endpoint(port, mux_id);
  183. if (!ep)
  184. goto free_skb;
  185. skb->dev = ep->egress_dev;
  186. /* Handle QMAPv5 packet */
  187. if (qmap->next_hdr &&
  188. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  189. RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
  190. if (rmnet_map_process_next_hdr_packet(skb, &list, len))
  191. goto free_skb;
  192. } else {
  193. /* We only have the main QMAP header to worry about */
  194. pskb_pull(skb, sizeof(*qmap));
  195. rmnet_set_skb_proto(skb);
  196. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  197. if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
  198. skb->ip_summed = CHECKSUM_UNNECESSARY;
  199. }
  200. pskb_trim(skb, len);
  201. /* Push the single packet onto the list */
  202. __skb_queue_tail(&list, skb);
  203. }
  204. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  205. qmi_rmnet_work_maybe_restart(port);
  206. rmnet_deliver_skb_list(&list, port);
  207. return;
  208. free_skb:
  209. kfree_skb(skb);
  210. }
  211. int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
  212. struct rmnet_port *port) __rcu __read_mostly;
  213. EXPORT_SYMBOL(rmnet_perf_deag_entry);
  214. static void
  215. rmnet_map_ingress_handler(struct sk_buff *skb,
  216. struct rmnet_port *port)
  217. {
  218. struct sk_buff *skbn;
  219. int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
  220. struct rmnet_port *port);
  221. if (skb->dev->type == ARPHRD_ETHER) {
  222. if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
  223. kfree_skb(skb);
  224. return;
  225. }
  226. skb_push(skb, ETH_HLEN);
  227. }
  228. if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  229. RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
  230. if (skb_is_nonlinear(skb)) {
  231. rmnet_frag_ingress_handler(skb, port);
  232. return;
  233. }
  234. }
  235. /* No aggregation. Pass the frame on as is */
  236. if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
  237. __rmnet_map_ingress_handler(skb, port);
  238. return;
  239. }
  240. /* Pass off handling to rmnet_perf module, if present */
  241. rcu_read_lock();
  242. rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
  243. if (rmnet_perf_core_deaggregate) {
  244. rmnet_perf_core_deaggregate(skb, port);
  245. rcu_read_unlock();
  246. return;
  247. }
  248. rcu_read_unlock();
  249. /* Deaggregation and freeing of HW originating
  250. * buffers is done within here
  251. */
  252. while (skb) {
  253. struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
  254. skb_shinfo(skb)->frag_list = NULL;
  255. while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
  256. __rmnet_map_ingress_handler(skbn, port);
  257. if (skbn == skb)
  258. goto next_skb;
  259. }
  260. consume_skb(skb);
  261. next_skb:
  262. skb = skb_frag;
  263. }
  264. }
  265. static int rmnet_map_egress_handler(struct sk_buff *skb,
  266. struct rmnet_port *port, u8 mux_id,
  267. struct net_device *orig_dev)
  268. {
  269. int required_headroom, additional_header_len, csum_type;
  270. struct rmnet_map_header *map_header;
  271. additional_header_len = 0;
  272. required_headroom = sizeof(struct rmnet_map_header);
  273. csum_type = 0;
  274. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
  275. additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
  276. csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
  277. } else if ((port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) ||
  278. (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
  279. additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
  280. csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
  281. }
  282. required_headroom += additional_header_len;
  283. if (skb_headroom(skb) < required_headroom) {
  284. if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
  285. return -ENOMEM;
  286. }
  287. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  288. qmi_rmnet_work_maybe_restart(port);
  289. if (csum_type)
  290. rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
  291. csum_type);
  292. map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
  293. port);
  294. if (!map_header)
  295. return -ENOMEM;
  296. map_header->mux_id = mux_id;
  297. if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
  298. if (rmnet_map_tx_agg_skip(skb, required_headroom))
  299. goto done;
  300. rmnet_map_tx_aggregate(skb, port);
  301. return -EINPROGRESS;
  302. }
  303. done:
  304. skb->protocol = htons(ETH_P_MAP);
  305. return 0;
  306. }
  307. static void
  308. rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
  309. {
  310. if (bridge_dev) {
  311. skb->dev = bridge_dev;
  312. dev_queue_xmit(skb);
  313. }
  314. }
  315. /* Ingress / Egress Entry Points */
  316. /* Processes packet as per ingress data format for receiving device. Logical
  317. * endpoint is determined from packet inspection. Packet is then sent to the
  318. * egress device listed in the logical endpoint configuration.
  319. */
  320. rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
  321. {
  322. struct sk_buff *skb = *pskb;
  323. struct rmnet_port *port;
  324. struct net_device *dev;
  325. int (*rmnet_core_shs_switch)(struct sk_buff *skb,
  326. struct rmnet_shs_clnt_s *cfg);
  327. if (!skb)
  328. goto done;
  329. if (skb->pkt_type == PACKET_LOOPBACK)
  330. return RX_HANDLER_PASS;
  331. trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
  332. 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
  333. dev = skb->dev;
  334. port = rmnet_get_port(dev);
  335. if (unlikely(!port)) {
  336. atomic_long_inc(&skb->dev->rx_nohandler);
  337. kfree_skb(skb);
  338. goto done;
  339. }
  340. switch (port->rmnet_mode) {
  341. case RMNET_EPMODE_VND:
  342. rcu_read_lock();
  343. rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
  344. if (rmnet_core_shs_switch && !skb->cb[1]) {
  345. skb->cb[1] = 1;
  346. rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
  347. rcu_read_unlock();
  348. return RX_HANDLER_CONSUMED;
  349. }
  350. rcu_read_unlock();
  351. rmnet_map_ingress_handler(skb, port);
  352. break;
  353. case RMNET_EPMODE_BRIDGE:
  354. rmnet_bridge_handler(skb, port->bridge_ep);
  355. break;
  356. }
  357. done:
  358. return RX_HANDLER_CONSUMED;
  359. }
  360. EXPORT_SYMBOL(rmnet_rx_handler);
  361. /* Modifies packet as per logical endpoint configuration and egress data format
  362. * for egress device configured in logical endpoint. Packet is then transmitted
  363. * on the egress device.
  364. */
  365. void rmnet_egress_handler(struct sk_buff *skb)
  366. {
  367. struct net_device *orig_dev;
  368. struct rmnet_port *port;
  369. struct rmnet_priv *priv;
  370. u8 mux_id;
  371. int err;
  372. u32 skb_len;
  373. trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
  374. 0xDEF, (void *)skb, NULL);
  375. sk_pacing_shift_update(skb->sk, 8);
  376. orig_dev = skb->dev;
  377. priv = netdev_priv(orig_dev);
  378. skb->dev = priv->real_dev;
  379. mux_id = priv->mux_id;
  380. port = rmnet_get_port(skb->dev);
  381. if (!port)
  382. goto drop;
  383. skb_len = skb->len;
  384. err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
  385. if (err == -ENOMEM) {
  386. goto drop;
  387. } else if (err == -EINPROGRESS) {
  388. rmnet_vnd_tx_fixup(orig_dev, skb_len);
  389. return;
  390. }
  391. rmnet_vnd_tx_fixup(orig_dev, skb_len);
  392. dev_queue_xmit(skb);
  393. return;
  394. drop:
  395. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  396. kfree_skb(skb);
  397. }