123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551 |
- /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * RMNET Data ingress/egress handler
- *
- */
- #include <linux/netdevice.h>
- #include <linux/netdev_features.h>
- #include <linux/if_arp.h>
- #include <linux/ip.h>
- #include <linux/ipv6.h>
- #include <linux/inet.h>
- #include <net/sock.h>
- #include <linux/tracepoint.h>
- #include "rmnet_private.h"
- #include "rmnet_config.h"
- #include "rmnet_vnd.h"
- #include "rmnet_map.h"
- #include "rmnet_handlers.h"
- #include "rmnet_descriptor.h"
- #include "rmnet_ll.h"
- #include "rmnet_module.h"
- #include "rmnet_qmi.h"
- #include "qmi_rmnet.h"
- #define RMNET_IP_VERSION_4 0x40
- #define RMNET_IP_VERSION_6 0x60
- #define CREATE_TRACE_POINTS
- #include "rmnet_trace.h"
- EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
- EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
- EXPORT_TRACEPOINT_SYMBOL(print_icmp_rx);
- /* Helper Functions */
- void rmnet_set_skb_proto(struct sk_buff *skb)
- {
- switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
- case RMNET_IP_VERSION_4:
- skb->protocol = htons(ETH_P_IP);
- break;
- case RMNET_IP_VERSION_6:
- skb->protocol = htons(ETH_P_IPV6);
- break;
- default:
- skb->protocol = htons(ETH_P_MAP);
- break;
- }
- }
- EXPORT_SYMBOL(rmnet_set_skb_proto);
- bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
- EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
- bool rmnet_slow_start_on(u32 hash_key)
- {
- bool (*rmnet_shs_slow_start_on)(u32 hash_key);
- rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
- if (rmnet_shs_slow_start_on)
- return rmnet_shs_slow_start_on(hash_key);
- return false;
- }
- EXPORT_SYMBOL(rmnet_slow_start_on);
- /* Shs hook handler */
- int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
- struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
- EXPORT_SYMBOL(rmnet_shs_skb_entry);
- int (*rmnet_shs_switch)(struct sk_buff *skb,
- struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
- EXPORT_SYMBOL(rmnet_shs_switch);
- /* Shs hook handler for work queue*/
- int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
- struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
- EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
- /* Generic handler */
- void
- rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
- {
- int (*rmnet_shs_stamp)(struct sk_buff *skb,
- struct rmnet_shs_clnt_s *cfg);
- trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
- 0xDEF, 0xDEF, (void *)skb, NULL);
- skb_reset_network_header(skb);
- rmnet_vnd_rx_fixup(skb->dev, skb->len);
- skb->pkt_type = PACKET_HOST;
- skb_set_mac_header(skb, 0);
- /* Low latency packets use a different balancing scheme */
- if (skb->priority == 0xda1a)
- goto skip_shs;
- rcu_read_lock();
- rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
- if (rmnet_shs_stamp) {
- rmnet_shs_stamp(skb, &port->shs_cfg);
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
- skip_shs:
- if (rmnet_module_hook_shs_skb_ll_entry(NULL, skb, &port->shs_cfg))
- return;
- netif_receive_skb(skb);
- }
- EXPORT_SYMBOL(rmnet_deliver_skb);
- /* Important to note, port cannot be used here if it has gone stale */
- void
- rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
- enum rmnet_packet_context ctx)
- {
- int (*rmnet_shs_stamp)(struct sk_buff *skb,
- struct rmnet_shs_clnt_s *cfg);
- struct rmnet_priv *priv = netdev_priv(skb->dev);
- trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
- 0xDEF, 0xDEF, (void *)skb, NULL);
- skb_reset_transport_header(skb);
- skb_reset_network_header(skb);
- rmnet_vnd_rx_fixup(skb->dev, skb->len);
- skb->pkt_type = PACKET_HOST;
- skb_set_mac_header(skb, 0);
- /* packets coming from work queue context due to packet flush timer
- * must go through the special workqueue path in SHS driver
- */
- rcu_read_lock();
- rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
- rcu_dereference(rmnet_shs_skb_entry_wq);
- if (rmnet_shs_stamp) {
- rmnet_shs_stamp(skb, &port->shs_cfg);
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
- if (ctx == RMNET_NET_RX_CTX)
- netif_receive_skb(skb);
- else
- gro_cells_receive(&priv->gro_cells, skb);
- }
- EXPORT_SYMBOL(rmnet_deliver_skb_wq);
- /* Deliver a list of skbs after undoing coalescing */
- static void rmnet_deliver_skb_list(struct sk_buff_head *head,
- struct rmnet_port *port)
- {
- struct sk_buff *skb;
- while ((skb = __skb_dequeue(head))) {
- rmnet_set_skb_proto(skb);
- rmnet_deliver_skb(skb, port);
- }
- }
- /* MAP handler */
- static void
- __rmnet_map_ingress_handler(struct sk_buff *skb,
- struct rmnet_port *port)
- {
- struct rmnet_map_header *qmap;
- struct rmnet_endpoint *ep;
- struct sk_buff_head list;
- u16 len, pad;
- u8 mux_id;
- /* We don't need the spinlock since only we touch this */
- __skb_queue_head_init(&list);
- qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
- if (qmap->cd_bit) {
- qmi_rmnet_set_dl_msg_active(port);
- if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
- if (!rmnet_map_flow_command(skb, port, false))
- return;
- }
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
- return rmnet_map_command(skb, port);
- goto free_skb;
- }
- mux_id = qmap->mux_id;
- pad = qmap->pad_len;
- len = ntohs(qmap->pkt_len) - pad;
- if (mux_id >= RMNET_MAX_LOGICAL_EP)
- goto free_skb;
- ep = rmnet_get_endpoint(port, mux_id);
- if (!ep)
- goto free_skb;
- skb->dev = ep->egress_dev;
- /* Handle QMAPv5 packet */
- if (qmap->next_hdr &&
- (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
- RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
- if (rmnet_map_process_next_hdr_packet(skb, &list, len))
- goto free_skb;
- } else {
- /* We only have the main QMAP header to worry about */
- pskb_pull(skb, sizeof(*qmap));
- rmnet_set_skb_proto(skb);
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
- if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- pskb_trim(skb, len);
- /* Push the single packet onto the list */
- __skb_queue_tail(&list, skb);
- }
- if (port->data_format & RMNET_INGRESS_FORMAT_PS)
- qmi_rmnet_work_maybe_restart(port, NULL, skb_peek(&list));
- rmnet_deliver_skb_list(&list, port);
- return;
- free_skb:
- kfree_skb(skb);
- }
- int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
- struct rmnet_port *port) __rcu __read_mostly;
- EXPORT_SYMBOL(rmnet_perf_deag_entry);
- static void
- rmnet_map_ingress_handler(struct sk_buff *skb,
- struct rmnet_port *port)
- {
- struct sk_buff *skbn;
- int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
- struct rmnet_port *port);
- if (skb->dev->type == ARPHRD_ETHER) {
- if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
- kfree_skb(skb);
- return;
- }
- skb_push(skb, ETH_HLEN);
- }
- if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
- RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5)) {
- if (skb_is_nonlinear(skb)) {
- rmnet_frag_ingress_handler(skb, port);
- return;
- }
- }
- /* No aggregation. Pass the frame on as is */
- if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
- __rmnet_map_ingress_handler(skb, port);
- return;
- }
- if (skb->priority == 0xda1a)
- goto no_perf;
- /* Pass off handling to rmnet_perf module, if present */
- rcu_read_lock();
- rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
- if (rmnet_perf_core_deaggregate) {
- rmnet_perf_core_deaggregate(skb, port);
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
- no_perf:
- /* Deaggregation and freeing of HW originating
- * buffers is done within here
- */
- while (skb) {
- struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = NULL;
- while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
- __rmnet_map_ingress_handler(skbn, port);
- if (skbn == skb)
- goto next_skb;
- }
- consume_skb(skb);
- next_skb:
- skb = skb_frag;
- }
- }
- static int rmnet_map_egress_handler(struct sk_buff *skb,
- struct rmnet_port *port, u8 mux_id,
- struct net_device *orig_dev,
- bool low_latency)
- {
- int required_headroom, additional_header_len, csum_type, tso = 0;
- struct rmnet_map_header *map_header;
- struct rmnet_aggregation_state *state;
- additional_header_len = 0;
- required_headroom = sizeof(struct rmnet_map_header);
- csum_type = 0;
- if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
- additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
- csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
- } else if ((port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5) ||
- (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
- additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
- csum_type = RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5;
- }
- required_headroom += additional_header_len;
- if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
- return -ENOMEM;
- }
- if (port->data_format & RMNET_INGRESS_FORMAT_PS)
- qmi_rmnet_work_maybe_restart(port, NULL, NULL);
- state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
- RMNET_DEFAULT_AGG_STATE];
- if (csum_type &&
- (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) &&
- skb_shinfo(skb)->gso_size) {
- spin_lock_bh(&state->agg_lock);
- rmnet_map_send_agg_skb(state);
- if (rmnet_map_add_tso_header(skb, port, orig_dev))
- return -EINVAL;
- csum_type = 0;
- tso = 1;
- }
- if (csum_type)
- rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
- csum_type);
- map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
- port);
- if (!map_header)
- return -ENOMEM;
- map_header->mux_id = mux_id;
- if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
- if (state->params.agg_count < 2 ||
- rmnet_map_tx_agg_skip(skb, required_headroom) || tso)
- goto done;
- rmnet_map_tx_aggregate(skb, port, low_latency);
- return -EINPROGRESS;
- }
- done:
- skb->protocol = htons(ETH_P_MAP);
- return 0;
- }
- static void
- rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
- {
- if (bridge_dev) {
- skb->dev = bridge_dev;
- dev_queue_xmit(skb);
- }
- }
- /* Ingress / Egress Entry Points */
- /* Processes packet as per ingress data format for receiving device. Logical
- * endpoint is determined from packet inspection. Packet is then sent to the
- * egress device listed in the logical endpoint configuration.
- */
- rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
- {
- struct sk_buff *skb = *pskb;
- struct rmnet_port *port;
- struct net_device *dev;
- struct rmnet_skb_cb *cb;
- int (*rmnet_core_shs_switch)(struct sk_buff *skb,
- struct rmnet_shs_clnt_s *cfg);
- if (!skb)
- goto done;
- if (skb->pkt_type == PACKET_LOOPBACK)
- return RX_HANDLER_PASS;
- trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
- 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
- dev = skb->dev;
- port = rmnet_get_port(dev);
- if (unlikely(!port)) {
- #if (KERNEL_VERSION(6, 0, 0) < LINUX_VERSION_CODE)
- dev_core_stats_rx_nohandler_inc(skb->dev);
- #else
- atomic_long_inc(&skb->dev->rx_nohandler);
- #endif
- kfree_skb(skb);
- goto done;
- }
- switch (port->rmnet_mode) {
- case RMNET_EPMODE_VND:
- rcu_read_lock();
- rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
- cb = RMNET_SKB_CB(skb);
- if (rmnet_core_shs_switch && !cb->qmap_steer &&
- skb->priority != 0xda1a) {
- cb->qmap_steer = 1;
- rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
- rcu_read_unlock();
- return RX_HANDLER_CONSUMED;
- }
- rcu_read_unlock();
- rmnet_map_ingress_handler(skb, port);
- break;
- case RMNET_EPMODE_BRIDGE:
- rmnet_bridge_handler(skb, port->bridge_ep);
- break;
- }
- done:
- return RX_HANDLER_CONSUMED;
- }
- EXPORT_SYMBOL(rmnet_rx_handler);
- rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
- {
- struct sk_buff *skb = *pskb;
- rx_handler_result_t rc = RX_HANDLER_PASS;
- rmnet_module_hook_wlan_ingress_rx_handler(&rc, pskb);
- if (rc != RX_HANDLER_PASS)
- return rc;
- rmnet_module_hook_perf_ingress_rx_handler(skb);
- return RX_HANDLER_PASS;
- }
- /* Modifies packet as per logical endpoint configuration and egress data format
- * for egress device configured in logical endpoint. Packet is then transmitted
- * on the egress device.
- */
- void rmnet_egress_handler(struct sk_buff *skb, bool low_latency)
- {
- struct net_device *orig_dev;
- struct rmnet_port *port;
- struct rmnet_priv *priv;
- u8 mux_id;
- int err;
- u32 skb_len;
- trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
- 0xDEF, (void *)skb, NULL);
- sk_pacing_shift_update(skb->sk, 8);
- orig_dev = skb->dev;
- priv = netdev_priv(orig_dev);
- skb->dev = priv->real_dev;
- mux_id = priv->mux_id;
- port = rmnet_get_port(skb->dev);
- if (!port)
- goto drop;
- skb_len = skb->len;
- err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev,
- low_latency);
- if (err == -ENOMEM || err == -EINVAL) {
- goto drop;
- } else if (err == -EINPROGRESS) {
- rmnet_vnd_tx_fixup(orig_dev, skb_len);
- return;
- }
- rmnet_vnd_tx_fixup(orig_dev, skb_len);
- if (low_latency) {
- if (rmnet_ll_send_skb(skb)) {
- /* Drop but no need to free. Above API handles that */
- this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
- return;
- }
- } else {
- dev_queue_xmit(skb);
- }
- return;
- drop:
- this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
- kfree_skb(skb);
- }
|