Browse Source

rmnet_core: LLC flow control

Support flow control with IPA on the low latency channel.

Change-Id: I1044abf3b8f59f928f9df3c29c50497b64b934a6
Signed-off-by: Weiyi Chen <[email protected]>
Weiyi Chen 3 years ago
parent
commit
f5c5b86727
4 changed files with 103 additions and 3 deletions
  1. 2 1
      core/rmnet_ll.c
  2. 6 0
      core/rmnet_ll.h
  3. 89 2
      core/rmnet_ll_ipa.c
  4. 6 0
      core/rmnet_vnd.c

+ 2 - 1
core/rmnet_ll.c

@@ -1,4 +1,5 @@
 /* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,7 +24,7 @@
 
 static struct rmnet_ll_stats rmnet_ll_stats;
 /* For TX sync with DMA operations */
-static DEFINE_SPINLOCK(rmnet_ll_tx_lock);
+DEFINE_SPINLOCK(rmnet_ll_tx_lock);
 
 /* Client operations for respective underlying HW */
 extern struct rmnet_ll_client_ops rmnet_ll_client;

+ 6 - 0
core/rmnet_ll.h

@@ -1,4 +1,5 @@
 /* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +30,11 @@ struct rmnet_ll_stats {
 		u64 rx_oom;
 		u64 rx_pkts;
 		u64 rx_tmp_allocs;
+		u64 tx_disabled;
+		u64 tx_enabled;
+		u64 tx_fc_queued;
+		u64 tx_fc_sent;
+		u64 tx_fc_err;
 };
 
 int rmnet_ll_send_skb(struct sk_buff *skb);

+ 89 - 2
core/rmnet_ll_ipa.c

@@ -1,4 +1,5 @@
 /* Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,17 +17,69 @@
 #include <linux/skbuff.h>
 #include <linux/ipa.h>
 #include <linux/if_ether.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
 #include "rmnet_ll.h"
 #include "rmnet_ll_core.h"
 
+#define IPA_RMNET_LL_RECEIVE 1
+#define IPA_RMNET_LL_FLOW_EVT 2
+
+#define MAX_Q_LEN 1000
+
 static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
+static struct sk_buff_head tx_pending_list;
+extern spinlock_t rmnet_ll_tx_lock;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
+static void rmnet_ll_ipa_tx_pending(unsigned long data);
+DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending, 0);
+static void rmnet_ll_ipa_tx_pending(unsigned long data)
+#else
+static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t);
+DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending);
+static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t)
+#endif
+{
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	struct sk_buff *skb;
+	int rc;
+
+	spin_lock_bh(&rmnet_ll_tx_lock);
+
+	while ((skb = __skb_dequeue(&tx_pending_list))) {
+		rc = ipa_rmnet_ll_xmit(skb);
+		if (rc == -EAGAIN) {
+			stats->tx_disabled++;
+			__skb_queue_head(&tx_pending_list, skb);
+			break;
+		}
+		if (rc >= 0)
+			stats->tx_fc_sent++;
+		else
+			stats->tx_fc_err++;
+	}
+
+	spin_unlock_bh(&rmnet_ll_tx_lock);
+}
 
 static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
 {
-	struct rmnet_ll_endpoint *ll_ep = *((struct rmnet_ll_endpoint **)arg);
+	struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ipa_ep;
 	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
 	struct sk_buff *skb, *tmp;
 
+	if (arg == (void *)(uintptr_t)(IPA_RMNET_LL_FLOW_EVT)) {
+		stats->tx_enabled++;
+		tasklet_schedule(&tx_pending_task);
+		return;
+	}
+
+	if (unlikely(arg != (void *)(uintptr_t)(IPA_RMNET_LL_RECEIVE))) {
+		pr_err("%s: invalid arg %u\n", __func__, (uintptr_t)arg);
+		return;
+	}
+
 	skb = rx_data;
 	/* Odds are IPA does this, but just to be safe */
 	skb->dev = ll_ep->phys_dev;
@@ -67,10 +120,16 @@ static void rmnet_ll_ipa_probe(void *arg)
 static void rmnet_ll_ipa_remove(void *arg)
 {
 	struct rmnet_ll_endpoint **ll_ep = arg;
+	struct sk_buff *skb;
 
 	dev_put((*ll_ep)->phys_dev);
 	kfree(*ll_ep);
 	*ll_ep = NULL;
+
+	spin_lock_bh(&rmnet_ll_tx_lock);
+	while ((skb = __skb_dequeue(&tx_pending_list)))
+		kfree_skb(skb);
+	spin_unlock_bh(&rmnet_ll_tx_lock);
 }
 
 static void rmnet_ll_ipa_ready(void * __unused)
@@ -90,17 +149,45 @@ static void rmnet_ll_ipa_ready(void * __unused)
 
 static int rmnet_ll_ipa_tx(struct sk_buff *skb)
 {
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	int rc;
+
 	if (!rmnet_ll_ipa_ep)
 		return -ENODEV;
 
+	if (!skb_queue_empty(&tx_pending_list))
+		goto queue_skb;
+
+	rc = ipa_rmnet_ll_xmit(skb);
+
+	/* rc >=0: success, return number of free descriptors left */
+	if (rc >= 0)
+		return 0;
+
 	/* IPA handles freeing the SKB on failure */
-	return ipa_rmnet_ll_xmit(skb);
+	if (rc != -EAGAIN)
+		return rc;
+
+	stats->tx_disabled++;
+
+queue_skb:
+	/* Flow controlled */
+	if (skb_queue_len(&tx_pending_list) >= MAX_Q_LEN) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	__skb_queue_tail(&tx_pending_list, skb);
+	stats->tx_fc_queued++;
+
+	return 0;
 }
 
 static int rmnet_ll_ipa_init(void)
 {
 	int rc;
 
+	__skb_queue_head_init(&tx_pending_list);
 	rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
 	if (rc == -EEXIST) {
 		/* IPA is already up. Call it ourselves, since they don't */

+ 6 - 0
core/rmnet_vnd.c

@@ -1,4 +1,5 @@
 /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -520,6 +521,11 @@ static const char rmnet_ll_gstrings_stats[][ETH_GSTRING_LEN] = {
 	"LL RX OOM errors",
 	"LL RX packets",
 	"LL RX temp buffer allocations",
+	"LL TX disabled",
+	"LL TX enabled",
+	"LL TX FC queued",
+	"LL TX FC sent",
+	"LL TX FC err",
 };
 
 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)