Bläddra i källkod

rmnet_core: LL receive packet steering

Steer packets from LLC to different CPU cores for rmnet processing
by assigning a different RX queue. Also bypass rmnet_offload and
rmnet_shs for LL packets.

Change-Id: I459dabe8dd02132614f0e2cf461c89274f18223c
Acked-by: Weiyi Chen <[email protected]>
Signed-off-by: Subash Abhinov Kasiviswanathan <[email protected]>
Subash Abhinov Kasiviswanathan 4 år sedan
förälder
incheckning
af7b029c04
3 ändrade filer med 16 tillägg och 1 borttagningar
  1. 9 0
      core/rmnet_descriptor.c
  2. 6 1
      core/rmnet_handlers.c
  3. 1 0
      core/rmnet_ll_ipa.c

+ 9 - 0
core/rmnet_descriptor.c

@@ -1528,6 +1528,7 @@ __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
 	LIST_HEAD(segs);
 	u16 len, pad;
 	u8 mux_id;
+	bool skip_perf = (frag_desc->priority == 0xda1a);
 
 	qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
 	if (!qmap)
@@ -1580,6 +1581,9 @@ __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
 	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
 		qmi_rmnet_work_maybe_restart(port);
 
+	if (skip_perf)
+		goto no_perf;
+
 	rcu_read_lock();
 	rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
 	if (rmnet_perf_ingress) {
@@ -1592,6 +1596,7 @@ __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
 	}
 	rcu_read_unlock();
 
+no_perf:
 	list_for_each_entry_safe(frag, tmp, &segs, list) {
 		list_del_init(&frag->list);
 		rmnet_frag_deliver(frag, port);
@@ -1611,6 +1616,7 @@ void rmnet_frag_ingress_handler(struct sk_buff *skb,
 {
 	rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
 	LIST_HEAD(desc_list);
+	bool skip_perf = (skb->priority == 0xda1a);
 
 	/* Deaggregation and freeing of HW originating
 	 * buffers is done within here
@@ -1635,6 +1641,9 @@ void rmnet_frag_ingress_handler(struct sk_buff *skb,
 		skb = skb_frag;
 	}
 
+	if (skip_perf)
+		return;
+
 	rcu_read_lock();
 	rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
 	if (rmnet_perf_opt_chain_end)

+ 6 - 1
core/rmnet_handlers.c

@@ -297,6 +297,9 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
 		return;
 	}
 
+	if (skb->priority == 0xda1a)
+		goto no_perf;
+
 	/* Pass off handling to rmnet_perf module, if present */
 	rcu_read_lock();
 	rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
@@ -307,6 +310,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
 	}
 	rcu_read_unlock();
 
+no_perf:
 	/* Deaggregation and freeing of HW originating
 	 * buffers is done within here
 	 */
@@ -445,7 +449,8 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
 
 		rcu_read_lock();
 		rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
-		if (rmnet_core_shs_switch && !skb->cb[1]) {
+		if (rmnet_core_shs_switch && !skb->cb[1] &&
+		    skb->priority != 0xda1a) {
 			skb->cb[1] = 1;
 			rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
 			rcu_read_unlock();

+ 1 - 0
core/rmnet_ll_ipa.c

@@ -31,6 +31,7 @@ static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
 	/* Odds are IPA does this, but just to be safe */
 	skb->dev = ll_ep->phys_dev;
 	skb->protocol = htons(ETH_P_MAP);
+	skb_record_rx_queue(skb, 1);
 
 	tmp = skb;
 	while (tmp) {