rmnet_handlers.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RMNET Data ingress/egress handler
  14. *
  15. */
  16. #include <linux/netdevice.h>
  17. #include <linux/netdev_features.h>
  18. #include <linux/if_arp.h>
  19. #include <linux/ip.h>
  20. #include <linux/ipv6.h>
  21. #include <linux/inet.h>
  22. #include <net/sock.h>
  23. #include <linux/tracepoint.h>
  24. #include "rmnet_private.h"
  25. #include "rmnet_config.h"
  26. #include "rmnet_vnd.h"
  27. #include "rmnet_map.h"
  28. #include "rmnet_handlers.h"
  29. #include "rmnet_descriptor.h"
  30. #include "rmnet_ll.h"
  31. #include "rmnet_module.h"
  32. #include "rmnet_qmi.h"
  33. #include "qmi_rmnet.h"
  34. #define RMNET_IP_VERSION_4 0x40
  35. #define RMNET_IP_VERSION_6 0x60
  36. #define CREATE_TRACE_POINTS
  37. #include "rmnet_trace.h"
  38. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
  39. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
  40. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
  41. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
  42. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
  43. EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
  44. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
  45. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
  46. EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
  47. EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
  48. EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
  49. EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
  50. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
  51. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
  52. EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
  53. EXPORT_TRACEPOINT_SYMBOL(print_icmp_rx);
  54. /* Helper Functions */
  55. void rmnet_set_skb_proto(struct sk_buff *skb)
  56. {
  57. switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
  58. case RMNET_IP_VERSION_4:
  59. skb->protocol = htons(ETH_P_IP);
  60. break;
  61. case RMNET_IP_VERSION_6:
  62. skb->protocol = htons(ETH_P_IPV6);
  63. break;
  64. default:
  65. skb->protocol = htons(ETH_P_MAP);
  66. break;
  67. }
  68. }
  69. EXPORT_SYMBOL(rmnet_set_skb_proto);
  70. bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
  71. EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
  72. bool rmnet_slow_start_on(u32 hash_key)
  73. {
  74. bool (*rmnet_shs_slow_start_on)(u32 hash_key);
  75. rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
  76. if (rmnet_shs_slow_start_on)
  77. return rmnet_shs_slow_start_on(hash_key);
  78. return false;
  79. }
  80. EXPORT_SYMBOL(rmnet_slow_start_on);
  81. /* Shs hook handler */
  82. int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
  83. struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
  84. EXPORT_SYMBOL(rmnet_shs_skb_entry);
  85. int (*rmnet_shs_switch)(struct sk_buff *skb,
  86. struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
  87. EXPORT_SYMBOL(rmnet_shs_switch);
  88. /* Shs hook handler for work queue*/
  89. int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
  90. struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
  91. EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
  92. /* Generic handler */
  93. void
  94. rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
  95. {
  96. int (*rmnet_shs_stamp)(struct sk_buff *skb,
  97. struct rmnet_shs_clnt_s *cfg);
  98. trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
  99. 0xDEF, 0xDEF, (void *)skb, NULL);
  100. skb_reset_network_header(skb);
  101. rmnet_vnd_rx_fixup(skb->dev, skb->len);
  102. skb->pkt_type = PACKET_HOST;
  103. skb_set_mac_header(skb, 0);
  104. /* Low latency packets use a different balancing scheme */
  105. if (skb->priority == 0xda1a)
  106. goto skip_shs;
  107. rcu_read_lock();
  108. rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
  109. if (rmnet_shs_stamp) {
  110. rmnet_shs_stamp(skb, &port->shs_cfg);
  111. rcu_read_unlock();
  112. return;
  113. }
  114. rcu_read_unlock();
  115. skip_shs:
  116. if (rmnet_module_hook_shs_skb_ll_entry(NULL, skb, &port->shs_cfg))
  117. return;
  118. netif_receive_skb(skb);
  119. }
  120. EXPORT_SYMBOL(rmnet_deliver_skb);
  121. /* Important to note, port cannot be used here if it has gone stale */
  122. void
  123. rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
  124. enum rmnet_packet_context ctx)
  125. {
  126. int (*rmnet_shs_stamp)(struct sk_buff *skb,
  127. struct rmnet_shs_clnt_s *cfg);
  128. struct rmnet_priv *priv = netdev_priv(skb->dev);
  129. trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
  130. 0xDEF, 0xDEF, (void *)skb, NULL);
  131. skb_reset_transport_header(skb);
  132. skb_reset_network_header(skb);
  133. rmnet_vnd_rx_fixup(skb->dev, skb->len);
  134. skb->pkt_type = PACKET_HOST;
  135. skb_set_mac_header(skb, 0);
  136. /* packets coming from work queue context due to packet flush timer
  137. * must go through the special workqueue path in SHS driver
  138. */
  139. rcu_read_lock();
  140. rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
  141. rcu_dereference(rmnet_shs_skb_entry_wq);
  142. if (rmnet_shs_stamp) {
  143. rmnet_shs_stamp(skb, &port->shs_cfg);
  144. rcu_read_unlock();
  145. return;
  146. }
  147. rcu_read_unlock();
  148. if (ctx == RMNET_NET_RX_CTX)
  149. netif_receive_skb(skb);
  150. else
  151. gro_cells_receive(&priv->gro_cells, skb);
  152. }
  153. EXPORT_SYMBOL(rmnet_deliver_skb_wq);
  154. /* Deliver a list of skbs after undoing coalescing */
  155. static void rmnet_deliver_skb_list(struct sk_buff_head *head,
  156. struct rmnet_port *port)
  157. {
  158. struct sk_buff *skb;
  159. while ((skb = __skb_dequeue(head))) {
  160. rmnet_set_skb_proto(skb);
  161. rmnet_deliver_skb(skb, port);
  162. }
  163. }
  164. /* MAP handler */
  165. static void
  166. __rmnet_map_ingress_handler(struct sk_buff *skb,
  167. struct rmnet_port *port)
  168. {
  169. struct rmnet_map_header *qmap;
  170. struct rmnet_endpoint *ep;
  171. struct sk_buff_head list;
  172. u16 len, pad;
  173. u8 mux_id;
  174. /* We don't need the spinlock since only we touch this */
  175. __skb_queue_head_init(&list);
  176. qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
  177. if (qmap->cd_bit) {
  178. qmi_rmnet_set_dl_msg_active(port);
  179. if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
  180. if (!rmnet_map_flow_command(skb, port, false))
  181. return;
  182. }
  183. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  184. return rmnet_map_command(skb, port);
  185. goto free_skb;
  186. }
  187. mux_id = qmap->mux_id;
  188. pad = qmap->pad_len;
  189. len = ntohs(qmap->pkt_len) - pad;
  190. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  191. goto free_skb;
  192. ep = rmnet_get_endpoint(port, mux_id);
  193. if (!ep)
  194. goto free_skb;
  195. skb->dev = ep->egress_dev;
  196. /* Handle QMAPv5 packet */
  197. if (qmap->next_hdr &&
  198. (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  199. RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
  200. if (rmnet_map_process_next_hdr_packet(skb, &list, len))
  201. goto free_skb;
  202. } else {
  203. /* We only have the main QMAP header to worry about */
  204. pskb_pull(skb, sizeof(*qmap));
  205. rmnet_set_skb_proto(skb);
  206. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  207. if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
  208. skb->ip_summed = CHECKSUM_UNNECESSARY;
  209. }
  210. pskb_trim(skb, len);
  211. /* Push the single packet onto the list */
  212. __skb_queue_tail(&list, skb);
  213. }
  214. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  215. qmi_rmnet_work_maybe_restart(port, NULL, skb_peek(&list));
  216. rmnet_deliver_skb_list(&list, port);
  217. return;
  218. free_skb:
  219. kfree_skb(skb);
  220. }
  221. int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
  222. struct rmnet_port *port) __rcu __read_mostly;
  223. EXPORT_SYMBOL(rmnet_perf_deag_entry);
  224. static void
  225. rmnet_map_ingress_handler(struct sk_buff *skb,
  226. struct rmnet_port *port)
  227. {
  228. struct sk_buff *skbn;
  229. int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
  230. struct rmnet_port *port);
  231. if (skb->dev->type == ARPHRD_ETHER) {
  232. if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
  233. kfree_skb(skb);
  234. return;
  235. }
  236. skb_push(skb, ETH_HLEN);
  237. }
  238. if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
  239. RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5)) {
  240. if (skb_is_nonlinear(skb)) {
  241. rmnet_frag_ingress_handler(skb, port);
  242. return;
  243. }
  244. }
  245. /* No aggregation. Pass the frame on as is */
  246. if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
  247. __rmnet_map_ingress_handler(skb, port);
  248. return;
  249. }
  250. if (skb->priority == 0xda1a)
  251. goto no_perf;
  252. /* Pass off handling to rmnet_perf module, if present */
  253. rcu_read_lock();
  254. rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
  255. if (rmnet_perf_core_deaggregate) {
  256. rmnet_perf_core_deaggregate(skb, port);
  257. rcu_read_unlock();
  258. return;
  259. }
  260. rcu_read_unlock();
  261. no_perf:
  262. /* Deaggregation and freeing of HW originating
  263. * buffers is done within here
  264. */
  265. while (skb) {
  266. struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
  267. skb_shinfo(skb)->frag_list = NULL;
  268. while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
  269. __rmnet_map_ingress_handler(skbn, port);
  270. if (skbn == skb)
  271. goto next_skb;
  272. }
  273. consume_skb(skb);
  274. next_skb:
  275. skb = skb_frag;
  276. }
  277. }
  278. static int rmnet_map_egress_handler(struct sk_buff *skb,
  279. struct rmnet_port *port, u8 mux_id,
  280. struct net_device *orig_dev,
  281. bool low_latency)
  282. {
  283. int required_headroom, additional_header_len, csum_type, tso = 0;
  284. struct rmnet_map_header *map_header;
  285. struct rmnet_aggregation_state *state;
  286. additional_header_len = 0;
  287. required_headroom = sizeof(struct rmnet_map_header);
  288. csum_type = 0;
  289. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
  290. additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
  291. csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
  292. } else if ((port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5) ||
  293. (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
  294. additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
  295. csum_type = RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5;
  296. }
  297. required_headroom += additional_header_len;
  298. if (skb_headroom(skb) < required_headroom) {
  299. if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
  300. return -ENOMEM;
  301. }
  302. if (port->data_format & RMNET_INGRESS_FORMAT_PS)
  303. qmi_rmnet_work_maybe_restart(port, NULL, NULL);
  304. state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
  305. RMNET_DEFAULT_AGG_STATE];
  306. if (csum_type &&
  307. (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) &&
  308. skb_shinfo(skb)->gso_size) {
  309. spin_lock_bh(&state->agg_lock);
  310. rmnet_map_send_agg_skb(state);
  311. if (rmnet_map_add_tso_header(skb, port, orig_dev))
  312. return -EINVAL;
  313. csum_type = 0;
  314. tso = 1;
  315. }
  316. if (csum_type)
  317. rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
  318. csum_type);
  319. map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
  320. port);
  321. if (!map_header)
  322. return -ENOMEM;
  323. map_header->mux_id = mux_id;
  324. if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
  325. if (state->params.agg_count < 2 ||
  326. rmnet_map_tx_agg_skip(skb, required_headroom) || tso)
  327. goto done;
  328. rmnet_map_tx_aggregate(skb, port, low_latency);
  329. return -EINPROGRESS;
  330. }
  331. done:
  332. skb->protocol = htons(ETH_P_MAP);
  333. return 0;
  334. }
  335. static void
  336. rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
  337. {
  338. if (bridge_dev) {
  339. skb->dev = bridge_dev;
  340. dev_queue_xmit(skb);
  341. }
  342. }
  343. /* Ingress / Egress Entry Points */
  344. /* Processes packet as per ingress data format for receiving device. Logical
  345. * endpoint is determined from packet inspection. Packet is then sent to the
  346. * egress device listed in the logical endpoint configuration.
  347. */
  348. rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
  349. {
  350. struct sk_buff *skb = *pskb;
  351. struct rmnet_port *port;
  352. struct net_device *dev;
  353. struct rmnet_skb_cb *cb;
  354. int (*rmnet_core_shs_switch)(struct sk_buff *skb,
  355. struct rmnet_shs_clnt_s *cfg);
  356. if (!skb)
  357. goto done;
  358. if (skb->pkt_type == PACKET_LOOPBACK)
  359. return RX_HANDLER_PASS;
  360. trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
  361. 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
  362. dev = skb->dev;
  363. port = rmnet_get_port(dev);
  364. if (unlikely(!port)) {
  365. #if (KERNEL_VERSION(6, 0, 0) < LINUX_VERSION_CODE)
  366. dev_core_stats_rx_nohandler_inc(skb->dev);
  367. #else
  368. atomic_long_inc(&skb->dev->rx_nohandler);
  369. #endif
  370. kfree_skb(skb);
  371. goto done;
  372. }
  373. switch (port->rmnet_mode) {
  374. case RMNET_EPMODE_VND:
  375. rcu_read_lock();
  376. rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
  377. cb = RMNET_SKB_CB(skb);
  378. if (rmnet_core_shs_switch && !cb->qmap_steer &&
  379. skb->priority != 0xda1a) {
  380. cb->qmap_steer = 1;
  381. rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
  382. rcu_read_unlock();
  383. return RX_HANDLER_CONSUMED;
  384. }
  385. rcu_read_unlock();
  386. rmnet_map_ingress_handler(skb, port);
  387. break;
  388. case RMNET_EPMODE_BRIDGE:
  389. rmnet_bridge_handler(skb, port->bridge_ep);
  390. break;
  391. }
  392. done:
  393. return RX_HANDLER_CONSUMED;
  394. }
  395. EXPORT_SYMBOL(rmnet_rx_handler);
  396. rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
  397. {
  398. struct sk_buff *skb = *pskb;
  399. rx_handler_result_t rc = RX_HANDLER_PASS;
  400. rmnet_module_hook_wlan_ingress_rx_handler(&rc, pskb);
  401. if (rc != RX_HANDLER_PASS)
  402. return rc;
  403. rmnet_module_hook_perf_ingress_rx_handler(skb);
  404. return RX_HANDLER_PASS;
  405. }
  406. /* Modifies packet as per logical endpoint configuration and egress data format
  407. * for egress device configured in logical endpoint. Packet is then transmitted
  408. * on the egress device.
  409. */
  410. void rmnet_egress_handler(struct sk_buff *skb, bool low_latency)
  411. {
  412. struct net_device *orig_dev;
  413. struct rmnet_port *port;
  414. struct rmnet_priv *priv;
  415. u8 mux_id;
  416. int err;
  417. u32 skb_len;
  418. trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
  419. 0xDEF, (void *)skb, NULL);
  420. sk_pacing_shift_update(skb->sk, 8);
  421. orig_dev = skb->dev;
  422. priv = netdev_priv(orig_dev);
  423. skb->dev = priv->real_dev;
  424. mux_id = priv->mux_id;
  425. port = rmnet_get_port(skb->dev);
  426. if (!port)
  427. goto drop;
  428. skb_len = skb->len;
  429. err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev,
  430. low_latency);
  431. if (err == -ENOMEM || err == -EINVAL) {
  432. goto drop;
  433. } else if (err == -EINPROGRESS) {
  434. rmnet_vnd_tx_fixup(orig_dev, skb_len);
  435. return;
  436. }
  437. rmnet_vnd_tx_fixup(orig_dev, skb_len);
  438. if (low_latency) {
  439. if (rmnet_ll_send_skb(skb)) {
  440. /* Drop but no need to free. Above API handles that */
  441. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  442. return;
  443. }
  444. } else {
  445. dev_queue_xmit(skb);
  446. }
  447. return;
  448. drop:
  449. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  450. kfree_skb(skb);
  451. }