Browse Source

rmnet_core: Send LLC acks in workqueue context

LLC acks are now sent in a workqueue context.

Change-Id: Ic162e6ad7575f9a6e73e5c96d7bc14c26c8ffe61
Acked-by: Weiyi Chen <[email protected]>
Signed-off-by: Subash Abhinov Kasiviswanathan <[email protected]>
Subash Abhinov Kasiviswanathan 4 years ago
parent
commit
0a575cfcbf
3 changed files with 87 additions and 12 deletions
  1. 3 0
      core/qmi_rmnet.c
  2. 2 1
      core/qmi_rmnet_i.h
  3. 82 11
      core/rmnet_ll_qmap.c

+ 3 - 0
core/qmi_rmnet.c

@@ -758,6 +758,7 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
 			qmi_rmnet_work_init(port);
 			rmnet_set_powersave_format(port);
 		}
+		rmnet_ll_wq_init();
 		break;
 	case NLMSG_CLIENT_DELETE:
 		if (!qmi)
@@ -772,6 +773,7 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
 			qmi_rmnet_work_exit(port);
 		}
 		qmi_rmnet_delete_client(port, qmi, tcm);
+		rmnet_ll_wq_exit();
 		break;
 	case NLMSG_SCALE_FACTOR:
 		if (!tcm->tcm_ifindex)
@@ -815,6 +817,7 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
 
 	wda_qmi_client_release(data);
 	qmi_rmnet_work_exit(port);
+	rmnet_ll_wq_exit();
 
 	if (data) {
 		wda_qmi_client_exit(data);

+ 2 - 1
core/qmi_rmnet_i.h

@@ -202,8 +202,9 @@ void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer);
 void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer);
 
 int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen);
-
 void rmnet_ll_guard_fn(struct timer_list *t);
+void rmnet_ll_wq_init(void);
+void rmnet_ll_wq_exit(void);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,

+ 82 - 11
core/rmnet_ll_qmap.c

@@ -14,6 +14,7 @@
 #include <linux/netlink.h>
 #include <uapi/linux/rtnetlink.h>
 #include <linux/net.h>
+#include <linux/workqueue.h>
 #include <net/sock.h>
 #include "dfc.h"
 #include "rmnet_qmi.h"
@@ -78,33 +79,103 @@ struct qmap_ll_switch_status {
 	struct qmap_ll_bearer	bearer[0];
 } __aligned(1);
 
-static void ll_send_nl_ack(struct rmnet_bearer_map *bearer)
+/*
+ * LL workqueue
+ */
+static DEFINE_SPINLOCK(ll_wq_lock);
+static struct workqueue_struct *ll_wq;
+
+struct ll_ack_work {
+	struct work_struct work;
+	u32 nl_pid;
+	u32 nl_seq;
+	u8 bearer_id;
+	u8 status_code;
+	u8 current_ch;
+};
+
+static void ll_ack_fn(struct work_struct *work)
 {
+	struct ll_ack_work *ack_work;
 	struct sk_buff *skb;
 	struct nlmsghdr *nlh;
 	struct nlmsgerr *errmsg;
 	unsigned int flags = NLM_F_CAPPED;
 
-	if (!(bearer->ch_switch.flags & LL_MASK_NL_ACK))
-		return;
+	ack_work = container_of(work, struct ll_ack_work, work);
 
-	skb = nlmsg_new(sizeof(*errmsg), GFP_ATOMIC);
+	skb = nlmsg_new(sizeof(*errmsg), GFP_KERNEL);
 	if (!skb)
-		return;
+		goto out;
 
-	nlh = __nlmsg_put(skb, bearer->ch_switch.nl_pid,
-			  bearer->ch_switch.nl_seq, NLMSG_ERROR,
+	nlh = __nlmsg_put(skb, ack_work->nl_pid,
+			  ack_work->nl_seq, NLMSG_ERROR,
 			  sizeof(*errmsg), flags);
 	errmsg = nlmsg_data(nlh);
 	errmsg->error = 0;
-	errmsg->msg.nlmsg_type = bearer->bearer_id;
-	errmsg->msg.nlmsg_flags = bearer->ch_switch.status_code;
-	errmsg->msg.nlmsg_seq = bearer->ch_switch.current_ch;
+	errmsg->msg.nlmsg_len = sizeof(struct nlmsghdr);
+	errmsg->msg.nlmsg_type = ack_work->bearer_id;
+	errmsg->msg.nlmsg_flags = ack_work->status_code;
+	errmsg->msg.nlmsg_seq = ack_work->current_ch;
+	errmsg->msg.nlmsg_pid = ack_work->nl_pid;
 	nlmsg_end(skb, nlh);
 
-	rtnl_unicast(skb, &init_net, bearer->ch_switch.nl_pid);
+	rtnl_unicast(skb, &init_net, ack_work->nl_pid);
+out:
+	kfree(ack_work);
+}
+
+static void ll_send_nl_ack(struct rmnet_bearer_map *bearer)
+{
+	struct ll_ack_work *ack_work;
+
+	if (!(bearer->ch_switch.flags & LL_MASK_NL_ACK))
+		return;
+
+	ack_work = kzalloc(sizeof(*ack_work), GFP_ATOMIC);
+	if (!ack_work)
+		return;
+
+	ack_work->nl_pid = bearer->ch_switch.nl_pid;
+	ack_work->nl_seq = bearer->ch_switch.nl_seq;
+	ack_work->bearer_id = bearer->bearer_id;
+	ack_work->status_code = bearer->ch_switch.status_code;
+	ack_work->current_ch = bearer->ch_switch.current_ch;
+	INIT_WORK(&ack_work->work, ll_ack_fn);
+
+	spin_lock_bh(&ll_wq_lock);
+	if (ll_wq)
+		queue_work(ll_wq, &ack_work->work);
+	else
+		kfree(ack_work);
+	spin_unlock_bh(&ll_wq_lock);
+}
+
+void rmnet_ll_wq_init(void)
+{
+	WARN_ON(ll_wq);
+	ll_wq = alloc_ordered_workqueue("rmnet_ll_wq", 0);
+}
+
+void rmnet_ll_wq_exit(void)
+{
+	struct workqueue_struct *tmp = NULL;
+
+	spin_lock_bh(&ll_wq_lock);
+	if (ll_wq) {
+		tmp = ll_wq;
+		ll_wq = NULL;
+	}
+	spin_unlock_bh(&ll_wq_lock);
+
+	if (tmp)
+		destroy_workqueue(tmp);
 }
 
+/*
+ * LLC switch
+ */
+
 static void ll_qmap_maybe_set_ch(struct qos_info *qos,
 				 struct rmnet_bearer_map *bearer, u8 status)
 {