net: qualcomm: rmnet: flush uplink aggregation on priority tx

If the RmNet driver needs to transmit a packet marked as needing
prioritization, it should skip uplink aggregation of the packet to avoid
adding additional delay. Additionally, any aggregated packets should be
flushed prior to sending the prioritized packet to avoid any ordering
issues.

Change-Id: Ia3063ec3f0553693a0f66c8eb5bbb88d250f7523
Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
This commit is contained in:
Sean Tranchetti
2020-03-04 16:32:43 -07:00
parent 2204dee91b
commit d6d43874af

View File

@@ -1409,11 +1409,30 @@ static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
return skb; return skb;
} }
static void rmnet_map_send_agg_skb(struct rmnet_port *port, unsigned long flags)
{
struct sk_buff *agg_skb;
if (!port->agg_skb) {
spin_unlock_irqrestore(&port->agg_lock, flags);
return;
}
agg_skb = port->agg_skb;
/* Reset the aggregation state */
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(port->agg_time));
port->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&port->hrtimer);
dev_queue_xmit(agg_skb);
}
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port) void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{ {
struct timespec64 diff, last; struct timespec64 diff, last;
int size, agg_count = 0; int size;
struct sk_buff *agg_skb;
unsigned long flags; unsigned long flags;
new_packet: new_packet:
@@ -1421,6 +1440,16 @@ new_packet:
memcpy(&last, &port->agg_last, sizeof(struct timespec)); memcpy(&last, &port->agg_last, sizeof(struct timespec));
ktime_get_real_ts64(&port->agg_last); ktime_get_real_ts64(&port->agg_last);
if ((port->data_format & RMNET_EGRESS_FORMAT_PRIORITY) &&
skb->priority) {
/* Send out any aggregated SKBs we have */
rmnet_map_send_agg_skb(port, flags);
/* Send out the priority SKB. Not holding agg_lock anymore */
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
if (!port->agg_skb) { if (!port->agg_skb) {
/* Check to see if we should agg first. If the traffic is very /* Check to see if we should agg first. If the traffic is very
* sparse, don't aggregate. We will need to tune this later * sparse, don't aggregate. We will need to tune this later
@@ -1461,15 +1490,7 @@ new_packet:
if (skb->len > size || if (skb->len > size ||
port->agg_count >= port->egress_agg_params.agg_count || port->agg_count >= port->egress_agg_params.agg_count ||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) { diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
agg_skb = port->agg_skb; rmnet_map_send_agg_skb(port, flags);
agg_count = port->agg_count;
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
port->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&port->hrtimer);
dev_queue_xmit(agg_skb);
goto new_packet; goto new_packet;
} }