Bladeren bron

Fastforwarding dataipa code to data-kernel.lnx.1.1

Change-Id: Ia444a39ed3b08abdacccd27a24685cd6ddcfc995
Arnav Sharma 4 jaren geleden
bovenliggende
commit
0701cc2fb8

+ 27 - 3
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -6574,6 +6574,12 @@ static inline void ipa3_enable_napi_netdev(void)
 	}
 }
 
+static inline void ipa3_disable_napi_netdev(void)
+{
+	if (ipa3_ctx->lan_rx_napi_enable)
+		netif_napi_del(&ipa3_ctx->napi_lan_rx);
+}
+
 static u32 get_tx_wrapper_cache_size(u32 cache_size)
 {
 	if (cache_size <= IPA_TX_WRAPPER_CACHE_MAX_THRESHOLD)
@@ -7148,7 +7154,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		if (result) {
 			IPADBG("Error: ODL init fialed\n");
 			result = -ENODEV;
-			goto fail_cdev_add;
+			goto fail_odl_init;
 		}
 	}
 
@@ -7164,14 +7170,32 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	/* Create the dummy netdev for LAN RX NAPI*/
 	ipa3_enable_napi_netdev();
 
-	ipa3_wwan_init();
+	result = ipa3_wwan_init();
+	if (result) {
+		IPAERR(":ipa3_wwan_init err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_wwan_init;
+	}
 
-	ipa3_rmnet_ctl_init();
+	result = ipa3_rmnet_ctl_init();
+	if (result) {
+		IPAERR(":ipa3_rmnet_ctl_init err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_rmnet_ctl_init;
+	}
 
 	mutex_init(&ipa3_ctx->app_clock_vote.mutex);
 
 	return 0;
 
+fail_rmnet_ctl_init:
+	ipa3_wwan_cleanup();
+fail_wwan_init:
+	ipa3_disable_napi_netdev();
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_1)
+		ipa_odl_cleanup();
+fail_odl_init:
+	cdev_del(cdev);
 fail_cdev_add:
 fail_gsi_pre_fw_load_init:
 	ipa3_dma_shutdown();

+ 2 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -1545,9 +1545,9 @@ static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
 #define TX_STATS(y) \
-	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+	stats.tx_ch_stats[0].y
 #define RX_STATS(y) \
-	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+	stats.rx_ch_stats[0].y
 
 	struct Ipa3HwStatsNTNInfoData_t stats;
 	int nbytes;

+ 4 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -534,6 +534,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
 					GSI_XFER_FLAG_EOT;
 				gsi_xfer[i].flags |=
 					GSI_XFER_FLAG_BEI;
+				hrtimer_try_to_cancel(&sys->db_timer);
+				sys->nop_pending = false;
 			} else {
 				send_nop = true;
 			}
@@ -1179,7 +1181,8 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		tasklet_init(&ep->sys->tasklet, ipa3_tasklet_rx_notify,
 				(unsigned long) ep->sys);
 
-	if (ipa3_ctx->tx_napi_enable) {
+	if (IPA_CLIENT_IS_PROD(ep->client) &&
+		ipa3_ctx->tx_napi_enable) {
 		if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
 			netif_tx_napi_add(&ipa3_ctx->generic_ndev,
 			&ep->sys->napi_tx, ipa3_aux_poll_tx_complete,

+ 86 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c

@@ -2235,6 +2235,81 @@ static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
+static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	unsigned int pipe_num = 0;
+	bool enable_pipe = true;
+	u32 pipe_bitmask = ipa3_ctx->hw_stats.drop.init.enabled_bitmask;
+	char seprator = ',';
+	int i, j;
+	bool is_pipe = false;
+	ssize_t ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+	dbg_buff[count] = '\0';
+	IPADBG("data is %s", dbg_buff);
+
+	i = 0;
+	while (dbg_buff[i] != ' ' && i < count)
+		i++;
+	j = i;
+	i++;
+	if (i < count) {
+		if (dbg_buff[i] == '0') {
+			enable_pipe = false;
+			IPADBG("Drop stats will be disabled for pipes:");
+		}
+	}
+
+	for (i = 0; i < j; i++) {
+		if (dbg_buff[i] >= '0' && dbg_buff[i] <= '9') {
+			pipe_num = (pipe_num * 10) + (dbg_buff[i] - '0');
+			is_pipe = true;
+		}
+		if (dbg_buff[i] == seprator) {
+			if (pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes
+				&& ipa3_get_client_by_pipe(pipe_num) <
+				IPA_CLIENT_MAX) {
+				IPADBG("pipe number %u\n", pipe_num);
+				if (enable_pipe)
+					pipe_bitmask = pipe_bitmask |
+							(1 << pipe_num);
+				else
+					pipe_bitmask = pipe_bitmask &
+							(~(1 << pipe_num));
+			}
+			pipe_num = 0;
+			is_pipe = false;
+		}
+	}
+	if (is_pipe && pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes &&
+		ipa3_get_client_by_pipe(pipe_num) < IPA_CLIENT_MAX) {
+		IPADBG("pipe number %u\n", pipe_num);
+		if (enable_pipe)
+			pipe_bitmask = pipe_bitmask | (1 << pipe_num);
+		else
+			pipe_bitmask = pipe_bitmask & (~(1 << pipe_num));
+	}
+
+	ipa_init_drop_stats(pipe_bitmask);
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
 static const struct file_operations ipa3_quota_ops = {
 	.read = ipa_debugfs_print_quota_stats,
 	.write = ipa_debugfs_reset_quota_stats,
@@ -2255,10 +2330,14 @@ static const struct file_operations ipa3_drop_ops = {
 	.write = ipa_debugfs_reset_drop_stats,
 };
 
+static const struct file_operations ipa3_enable_drop_ops = {
+	.write = ipa_debugfs_enable_disable_drop_stats,
+};
 
 int ipa_debugfs_init_stats(struct dentry *parent)
 {
 	const mode_t read_write_mode = 0664;
+	const mode_t write_mode = 0220;
 	struct dentry *file;
 	struct dentry *dent;
 
@@ -2285,6 +2364,13 @@ int ipa_debugfs_init_stats(struct dentry *parent)
 		goto fail;
 	}
 
+	file = debugfs_create_file("enable_drop_stats", write_mode, dent, NULL,
+		&ipa3_enable_drop_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "enable_drop_stats");
+		goto fail;
+	}
+
 	file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
 		&ipa3_tethering_ops);
 	if (IS_ERR_OR_NULL(file)) {

+ 20 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_odl.c

@@ -771,6 +771,26 @@ fail_mem_ctx:
 	return result;
 }
 
+void ipa_odl_cleanup(void)
+{
+	struct ipa3_odl_char_device_context *odl_cdev;
+
+	if (!ipa3_odl_ctx)
+		return;
+
+	odl_cdev = ipa3_odl_ctx->odl_cdev;
+
+	ipa_pm_deregister(ipa3_odl_ctx->odl_pm_hdl);
+	device_destroy(odl_cdev[1].class, odl_cdev[1].dev_num);
+	unregister_chrdev_region(odl_cdev[1].dev_num, 1);
+	class_destroy(odl_cdev[1].class);
+	device_destroy(odl_cdev[0].class, odl_cdev[0].dev_num);
+	unregister_chrdev_region(odl_cdev[0].dev_num, 1);
+	class_destroy(odl_cdev[0].class);
+	kfree(ipa3_odl_ctx);
+	ipa3_odl_ctx = NULL;
+}
+
 bool ipa3_is_odl_connected(void)
 {
 	return ipa3_odl_ctx->odl_state.odl_connected;

+ 1 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_odl.h

@@ -69,6 +69,7 @@ struct ipa3_push_msg_odl {
 extern struct ipa_odl_context *ipa3_odl_ctx;
 
 int ipa_odl_init(void);
+void ipa_odl_cleanup(void);
 void ipa3_odl_pipe_cleanup(bool is_ssr);
 int ipa3_odl_pipe_open(void);
 

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c

@@ -439,7 +439,7 @@ static int ipa3_check_qmi_response(int rc,
 			"Timeout for qmi request id %d\n", req_id);
 			return rc;
 		}
-		if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+		if ((rc == -ENETRESET) || (rc == -ENODEV) || (rc == -ECONNRESET)) {
 			IPAWANERR(
 			"SSR while waiting for qmi request id %d\n", req_id);
 			return rc;

+ 3 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018 - 2020, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -705,6 +705,7 @@ int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 		IPAERR("failed to release gsi channel: %d\n", result);
 		goto exit;
 	}
+	ipa3_release_wdi3_gsi_smmu_mappings(IPA_WDI3_TX_DIR);
 
 	memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
 	IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
@@ -725,6 +726,7 @@ int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 		IPAERR("failed to release gsi channel: %d\n", result);
 		goto exit;
 	}
+	ipa3_release_wdi3_gsi_smmu_mappings(IPA_WDI3_RX_DIR);
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
 		ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI3);

+ 7 - 7
drivers/platform/msm/ipa/ipa_v3/rmnet_ctl_ipa.c

@@ -383,6 +383,7 @@ void ipa3_rmnet_ctl_ready_notifier(void)
 int ipa3_rmnet_ctl_xmit(struct sk_buff *skb)
 {
 	int ret;
+	int len;
 	unsigned long flags;
 
 	if (!ipa3_ctx->rmnet_ctl_enable) {
@@ -451,6 +452,7 @@ int ipa3_rmnet_ctl_xmit(struct sk_buff *skb)
 	}
 	spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
 
+	len = skb->len;
 	/*
 	 * both data packets and command will be routed to
 	 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
@@ -478,8 +480,7 @@ int ipa3_rmnet_ctl_xmit(struct sk_buff *skb)
 	spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
 	atomic_inc(&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts);
 	rmnet_ctl_ipa3_ctx->stats.tx_pkt_sent++;
-	rmnet_ctl_ipa3_ctx->stats.tx_byte_sent +=
-		skb->len;
+	rmnet_ctl_ipa3_ctx->stats.tx_byte_sent += len;
 	ret = 0;
 
 out:
@@ -496,6 +497,7 @@ static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
 	int ret;
 	unsigned long flags;
 	struct sk_buff *skb;
+	int len;
 
 	/* calling from WQ */
 	ret = ipa_pm_activate_sync(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
@@ -512,6 +514,7 @@ static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
 	/* dequeue the skb */
 	while (skb_queue_len(&rmnet_ctl_ipa3_ctx->tx_queue) > 0) {
 		skb = skb_dequeue(&rmnet_ctl_ipa3_ctx->tx_queue);
+		len = skb->len;
 		spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
 		/*
 		 * both data packets and command will be routed to
@@ -527,8 +530,6 @@ static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
 				rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
 				rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
 					skb->len;
-				spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
-					flags);
 				kfree_skb(skb);
 				continue;
 			}
@@ -541,10 +542,9 @@ static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
 		atomic_inc(&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts);
 		spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
 		rmnet_ctl_ipa3_ctx->stats.tx_pkt_sent++;
-		rmnet_ctl_ipa3_ctx->stats.tx_byte_sent +=
-			skb->len;
-		spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
+		rmnet_ctl_ipa3_ctx->stats.tx_byte_sent += len;
 	}
+	spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
 	goto out;
 
 delayed_work: