rmnet_core: Add support for TSO

This adds support for the new transmit offload header as well as
the handling needed for it in the core driver.

CRs-Fixed: 2810638
Change-Id: I8ce2e0772209faf3d585e7d9d8d56eceb695d586
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
This commit is contained in:
Subash Abhinov Kasiviswanathan
2020-12-16 23:50:03 -07:00
parent 4c8ad36ec7
commit dad91a4eff
5 changed files with 75 additions and 4 deletions

View File

@@ -175,6 +175,8 @@ struct rmnet_priv_stats {
u64 csum_hw;
struct rmnet_coal_stats coal;
u64 ul_prio;
u64 tso_pkts;
u64 tso_arriv_errs;
};
struct rmnet_priv {

View File

@@ -325,7 +325,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len, csum_type;
int required_headroom, additional_header_len, csum_type, tso = 0;
struct rmnet_map_header *map_header;
additional_header_len = 0;
@@ -351,6 +351,20 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
qmi_rmnet_work_maybe_restart(port);
if (csum_type &&
(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) &&
skb_shinfo(skb)->gso_size) {
unsigned long flags;
spin_lock_irqsave(&port->agg_lock, flags);
rmnet_map_send_agg_skb(port, flags);
if (rmnet_map_add_tso_header(skb, port, orig_dev))
return -EINVAL;
csum_type = 0;
tso = 1;
}
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
csum_type);
@@ -363,7 +377,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header->mux_id = mux_id;
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
if (rmnet_map_tx_agg_skip(skb, required_headroom))
if (rmnet_map_tx_agg_skip(skb, required_headroom) || tso)
goto done;
rmnet_map_tx_aggregate(skb, port);
@@ -467,7 +481,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
skb_len = skb->len;
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
if (err == -ENOMEM) {
if (err == -ENOMEM || err == -EINVAL) {
goto drop;
} else if (err == -EINPROGRESS) {
rmnet_vnd_tx_fixup(orig_dev, skb_len);

View File

@@ -48,6 +48,7 @@ enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_TSO = 0x3,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};
@@ -111,6 +112,16 @@ struct rmnet_map_v5_coal_header {
struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
} __aligned(1);
struct rmnet_map_v5_tso_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:5;
u8 priority:1;
u8 zero_csum:1;
u8 ip_id_cfg:1;
__be16 segment_size;
} __aligned(1);
/* QMAP v4 headers */
struct rmnet_map_dl_csum_trailer {
u8 reserved1;
@@ -280,4 +291,7 @@ int rmnet_map_dl_ind_register(struct rmnet_port *port,
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind);
void rmnet_map_cmd_exit(struct rmnet_port *port);
void rmnet_map_send_agg_skb(struct rmnet_port *port, unsigned long flags);
int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */

View File

@@ -1423,7 +1423,7 @@ static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
return skb;
}
static void rmnet_map_send_agg_skb(struct rmnet_port *port, unsigned long flags)
void rmnet_map_send_agg_skb(struct rmnet_port *port, unsigned long flags)
{
struct sk_buff *agg_skb;
@@ -1621,3 +1621,38 @@ void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb)
dev_queue_xmit(qmap_skb);
}
EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_v5_tso_header *ul_header;
if (!(orig_dev->features & (NETIF_F_ALL_TSO | NETIF_F_GSO_UDP_L4))) {
priv->stats.tso_arriv_errs++;
return -EINVAL;
}
ul_header = (struct rmnet_map_v5_tso_header *)
skb_push(skb, sizeof(*ul_header));
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_TSO;
if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
rmnet_map_v5_check_priority(skb, orig_dev,
(struct rmnet_map_v5_csum_header *)ul_header);
ul_header->segment_size = htons(skb_shinfo(skb)->gso_size);
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
ul_header->ip_id_cfg = 1;
skb->ip_summed = CHECKSUM_NONE;
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_segs = 0;
skb_shinfo(skb)->gso_type = 0;
priv->stats.tso_pkts++;
return 0;
}

View File

@@ -257,6 +257,8 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Coalescing UDP frames",
"Coalescing UDP bytes",
"Uplink priority packets",
"TSO packets",
"TSO packets arriving incorrectly",
};
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -390,9 +392,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
rmnet_dev->hw_features |= NETIF_F_GRO_HW;
rmnet_dev->hw_features |= NETIF_F_GSO_UDP_L4;
rmnet_dev->hw_features |= NETIF_F_ALL_TSO;
priv->real_dev = real_dev;
rmnet_dev->gso_max_size = 64000;
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;