core: Add transmit path locking

This is needed to avoid any out of order transmits between
the UL aggregation workqueue context and the direct transmits from
application context. irq_save / restore variant of the lock cannot
be used as dev_queue_xmit() requires interrupts to be enabled.

CRs-Fixed: 3122333
Change-Id: Ie1020bf17e8c1d0e8fc14e999c21c3e007f4f183
Signed-off-by: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
Цей коміт міститься в:
Subash Abhinov Kasiviswanathan
2022-02-02 15:51:48 -08:00
зафіксовано Gerrit - the friendly Code Review server
джерело a2ea279ba9
коміт d7c67e2ec4
3 змінених файлів з 23 додано та 33 видалено

Переглянути файл

@@ -371,10 +371,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (csum_type &&
(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) &&
skb_shinfo(skb)->gso_size) {
unsigned long flags;
spin_lock_irqsave(&state->agg_lock, flags);
rmnet_map_send_agg_skb(state, flags);
spin_lock_bh(&state->agg_lock);
rmnet_map_send_agg_skb(state);
if (rmnet_map_add_tso_header(skb, port, orig_dev))
return -EINVAL;

Переглянути файл

@@ -295,8 +295,7 @@ int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind);
void rmnet_map_cmd_exit(struct rmnet_port *port);
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state,
unsigned long flags);
void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state);
int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */

Переглянути файл

@@ -1263,11 +1263,10 @@ static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct rmnet_aggregation_state *state;
unsigned long flags;
state = container_of(work, struct rmnet_aggregation_state, agg_wq);
spin_lock_irqsave(&state->agg_lock, flags);
spin_lock_bh(&state->agg_lock);
if (likely(state->agg_state == -EINPROGRESS)) {
/* Buffer may have already been shipped out */
if (likely(state->agg_skb)) {
@@ -1279,9 +1278,9 @@ static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
state->agg_state = 0;
}
spin_unlock_irqrestore(&state->agg_lock, flags);
if (skb)
state->send_agg_skb(skb);
spin_unlock_bh(&state->agg_lock);
}
enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
@@ -1438,13 +1437,12 @@ rmnet_map_build_skb(struct rmnet_aggregation_state *state)
return skb;
}
void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state,
unsigned long flags)
void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state)
{
struct sk_buff *agg_skb;
if (!state->agg_skb) {
spin_unlock_irqrestore(&state->agg_lock, flags);
spin_unlock_bh(&state->agg_lock);
return;
}
@@ -1454,9 +1452,9 @@ void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state,
state->agg_count = 0;
memset(&state->agg_time, 0, sizeof(state->agg_time));
state->agg_state = 0;
spin_unlock_irqrestore(&state->agg_lock, flags);
hrtimer_cancel(&state->hrtimer);
state->send_agg_skb(agg_skb);
spin_unlock_bh(&state->agg_lock);
hrtimer_cancel(&state->hrtimer);
}
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
@@ -1465,20 +1463,19 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
struct rmnet_aggregation_state *state;
struct timespec64 diff, last;
int size;
unsigned long flags;
state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
RMNET_DEFAULT_AGG_STATE];
new_packet:
spin_lock_irqsave(&state->agg_lock, flags);
spin_lock_bh(&state->agg_lock);
memcpy(&last, &state->agg_last, sizeof(last));
ktime_get_real_ts64(&state->agg_last);
if ((port->data_format & RMNET_EGRESS_FORMAT_PRIORITY) &&
(RMNET_LLM(skb->priority) || RMNET_APS_LLB(skb->priority))) {
/* Send out any aggregated SKBs we have */
rmnet_map_send_agg_skb(state, flags);
rmnet_map_send_agg_skb(state);
/* Send out the priority SKB. Not holding agg_lock anymore */
skb->protocol = htons(ETH_P_MAP);
state->send_agg_skb(skb);
@@ -1494,9 +1491,9 @@ new_packet:
if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
size <= 0) {
spin_unlock_irqrestore(&state->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
state->send_agg_skb(skb);
spin_unlock_bh(&state->agg_lock);
return;
}
@@ -1505,9 +1502,9 @@ new_packet:
state->agg_skb = NULL;
state->agg_count = 0;
memset(&state->agg_time, 0, sizeof(state->agg_time));
spin_unlock_irqrestore(&state->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
state->send_agg_skb(skb);
spin_unlock_bh(&state->agg_lock);
return;
}
@@ -1525,7 +1522,7 @@ new_packet:
if (skb->len > size ||
state->agg_count >= state->params.agg_count ||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
rmnet_map_send_agg_skb(state, flags);
rmnet_map_send_agg_skb(state);
goto new_packet;
}
@@ -1540,15 +1537,13 @@ schedule:
ns_to_ktime(state->params.agg_time),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&state->agg_lock, flags);
spin_unlock_bh(&state->agg_lock);
}
void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
u16 size, u8 count, u8 features, u32 time)
{
unsigned long irq_flags;
spin_lock_irqsave(&state->agg_lock, irq_flags);
spin_lock_bh(&state->agg_lock);
state->params.agg_count = count;
state->params.agg_time = time;
state->params.agg_size = size;
@@ -1572,7 +1567,7 @@ void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
rmnet_alloc_agg_pages(state);
done:
spin_unlock_irqrestore(&state->agg_lock, irq_flags);
spin_unlock_bh(&state->agg_lock);
}
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
@@ -1607,7 +1602,6 @@ void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
{
unsigned long flags;
unsigned int i;
for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
@@ -1620,7 +1614,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
struct rmnet_aggregation_state *state = &port->agg_state[i];
spin_lock_irqsave(&state->agg_lock, flags);
spin_lock_bh(&state->agg_lock);
if (state->agg_state == -EINPROGRESS) {
if (state->agg_skb) {
kfree_skb(state->agg_skb);
@@ -1634,7 +1628,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
}
rmnet_free_agg_pages(state);
spin_unlock_irqrestore(&state->agg_lock, flags);
spin_unlock_bh(&state->agg_lock);
}
}
@@ -1643,7 +1637,6 @@ void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
struct rmnet_aggregation_state *state;
struct rmnet_port *port;
struct sk_buff *agg_skb;
unsigned long flags;
if (unlikely(ch >= RMNET_MAX_AGG_STATE))
ch = RMNET_DEFAULT_AGG_STATE;
@@ -1661,18 +1654,18 @@ void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
if (!(port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
goto send;
spin_lock_irqsave(&state->agg_lock, flags);
spin_lock_bh(&state->agg_lock);
if (state->agg_skb) {
agg_skb = state->agg_skb;
state->agg_skb = NULL;
state->agg_count = 0;
memset(&state->agg_time, 0, sizeof(state->agg_time));
state->agg_state = 0;
spin_unlock_irqrestore(&state->agg_lock, flags);
hrtimer_cancel(&state->hrtimer);
state->send_agg_skb(agg_skb);
spin_unlock_bh(&state->agg_lock);
hrtimer_cancel(&state->hrtimer);
} else {
spin_unlock_irqrestore(&state->agg_lock, flags);
spin_unlock_bh(&state->agg_lock);
}
send: