Browse Source

Revert "ipa: distribute non-hash flt table"

This reverts commit b06baeebf1993522e84076ff0b65d8715bc56762.
Ilia Lin 3 years ago
parent
commit
885066266d

+ 45 - 39
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -7346,11 +7346,9 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	struct gsi_per_props gsi_props;
 	struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
 	struct ipa3_flt_tbl *flt_tbl;
-	struct ipa3_flt_tbl_nhash_lcl *lcl_tbl;
 	int i;
 	struct idr *idr;
 	bool reg = false;
-	enum ipa_ip_type ip;
 
 	if (ipa3_ctx == NULL) {
 		IPADBG("IPA driver haven't initialized\n");
@@ -7406,18 +7404,18 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 		ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
 
 	IPADBG("ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
-		ipa3_ctx->rt_tbl_hash_lcl[IPA_IP_v4], ipa3_ctx->rt_tbl_nhash_lcl[IPA_IP_v4]);
+		ipa3_ctx->ip4_rt_tbl_hash_lcl, ipa3_ctx->ip4_rt_tbl_nhash_lcl);
 
 	IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
-		ipa3_ctx->rt_tbl_hash_lcl[IPA_IP_v6], ipa3_ctx->rt_tbl_nhash_lcl[IPA_IP_v6]);
+		ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
 
 	IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
-		ipa3_ctx->flt_tbl_hash_lcl[IPA_IP_v4],
-		ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v4]);
+		ipa3_ctx->ip4_flt_tbl_hash_lcl,
+		ipa3_ctx->ip4_flt_tbl_nhash_lcl);
 
 	IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
-		ipa3_ctx->flt_tbl_hash_lcl[IPA_IP_v6],
-		ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v6]);
+		ipa3_ctx->ip6_flt_tbl_hash_lcl,
+		ipa3_ctx->ip6_flt_tbl_nhash_lcl);
 
 	if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
 		IPAERR("SW expect more core memory, needed %d, avail %d\n",
@@ -7484,43 +7482,51 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
 	idr_init(idr);
 
-	INIT_LIST_HEAD(&ipa3_ctx->flt_tbl_nhash_lcl_list[IPA_IP_v4]);
-	INIT_LIST_HEAD(&ipa3_ctx->flt_tbl_nhash_lcl_list[IPA_IP_v6]);
-
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		if (!ipa_is_ep_support_flt(i))
 			continue;
 
-		for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) {
-			flt_tbl = &ipa3_ctx->flt_tbl[i][ip];
-			INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
-			flt_tbl->in_sys[IPA_RULE_HASHABLE] = !ipa3_ctx->flt_tbl_hash_lcl[ip];
-
-			/*	For ETH client place Non-Hash FLT table in SRAM if allowed, for
-				all other EPs always place the table in DDR */
-			if (ipa3_ctx->flt_tbl_nhash_lcl[ip] &&
-			    (IPA_CLIENT_IS_ETH_PROD(i) ||
-			     ((ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_TEST) &&
-			      (i == ipa3_get_ep_mapping(IPA_CLIENT_TEST_PROD))))) {
-				flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = false;
-				lcl_tbl = kcalloc(1, sizeof(struct ipa3_flt_tbl_nhash_lcl),
-						  GFP_KERNEL);
-				WARN_ON(lcl_tbl);
-				if (likely(lcl_tbl)) {
-					lcl_tbl->tbl = flt_tbl;
-					/* Add to the head of the list, to be pulled first */
-					list_add(&lcl_tbl->link,
-						 &ipa3_ctx->flt_tbl_nhash_lcl_list[ip]);
-				}
-			} else
-				flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = true;
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
+
+		/*	For ETH client place Non-Hash FLT table in SRAM if allowed, for
+			all other EPs always place the table in DDR */
+		if (IPA_CLIENT_IS_ETH_PROD(i) ||
+			((ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_TEST) &&
+			(i == ipa3_get_ep_mapping(IPA_CLIENT_TEST_PROD))))
+			flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+		else
+			flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = true;
 
-			/* Init force sys to false */
-			flt_tbl->force_sys[IPA_RULE_HASHABLE] = false;
-			flt_tbl->force_sys[IPA_RULE_NON_HASHABLE] = false;
+		/* Init force sys to false */
+		flt_tbl->force_sys[IPA_RULE_HASHABLE] = false;
+		flt_tbl->force_sys[IPA_RULE_NON_HASHABLE] = false;
 
-			flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[ip];
-		}
+		flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
+
+		/*	For ETH client place Non-Hash FLT table in SRAM if allowed, for
+			all other EPs always place the table in DDR */
+		if (IPA_CLIENT_IS_ETH_PROD(i) ||
+			((ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_TEST) &&
+			(i == ipa3_get_ep_mapping(IPA_CLIENT_TEST_PROD))))
+			flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+		else
+			flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = true;
+
+		/* Init force sys to false */
+		flt_tbl->force_sys[IPA_RULE_HASHABLE] = false;
+		flt_tbl->force_sys[IPA_RULE_NON_HASHABLE] = false;
+
+		flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
 	}
 
 	if (!ipa3_ctx->apply_rg10_wa) {

+ 57 - 24
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -1084,14 +1084,25 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
 
 	mutex_lock(&ipa3_ctx->lock);
 
-	if (ipa3_ctx->rt_tbl_hash_lcl[ip])
-		pr_err("Hashable table resides on local memory\n");
-	else
-		pr_err("Hashable table resides on system (ddr) memory\n");
-	if (ipa3_ctx->rt_tbl_nhash_lcl[ip])
-		pr_err("Non-Hashable table resides on local memory\n");
-	else
-		pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	if (ip ==  IPA_IP_v6) {
+		if (ipa3_ctx->ip6_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip6_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa3_ctx->ip4_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip4_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	}
 
 	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
 		i = 0;
@@ -1332,14 +1343,25 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
 
 	mutex_lock(&ipa3_ctx->lock);
 
-	if (ipa3_ctx->flt_tbl_hash_lcl[ip])
-		pr_err("Hashable table resides on local memory\n");
-	else
-		pr_err("Hashable table resides on system (ddr) memory\n");
-	if (ipa3_ctx->flt_tbl_nhash_lcl[ip])
-		pr_err("Non-Hashable table resides on local memory\n");
-	else
-		pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	if (ip == IPA_IP_v6) {
+		if (ipa3_ctx->ip6_flt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip6_flt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa3_ctx->ip4_flt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip4_flt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	}
 
 	for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) {
 		if (!ipa_is_ep_support_flt(j))
@@ -1429,14 +1451,25 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	mutex_lock(&ipa3_ctx->lock);
 
-	if (ipa3_ctx->flt_tbl_hash_lcl[ip])
-		pr_err("Hashable table resides on local memory\n");
-	else
-		pr_err("Hashable table resides on system (ddr) memory\n");
-	if (ipa3_ctx->flt_tbl_nhash_lcl[ip])
-		pr_err("Non-Hashable table resides on local memory\n");
-	else
-		pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	if (ip == IPA_IP_v6) {
+		if (ipa3_ctx->ip6_flt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip6_flt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa3_ctx->ip4_flt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip4_flt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	}
 
 	for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) {
 		if (!ipa_is_ep_support_flt(pipe))

+ 98 - 31
drivers/platform/msm/ipa/ipa_v3/ipa_flt.c

@@ -111,6 +111,33 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
 	}
 }
 
+static void __ipa_reap_curr_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa_mem_buffer buf = { 0 };
+	int i;
+
+	IPADBG_LOW("reaping current sys flt tbls ip=%d rlt=%d\n", ip, rlt);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->curr_mem[rlt].phys_base && tbl->prev_mem[rlt].phys_base) {
+			IPADBG_LOW("reaping flt tbl (curr) pipe=%d phys_addr: 0x%x\n",
+				i, tbl->curr_mem[rlt].phys_base);
+			ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
+
+			IPADBG_LOW("moving prev flt tbl to curr pipe=%d phys_addr: 0x%x\n",
+				i, tbl->prev_mem[rlt].phys_base);
+			tbl->curr_mem[rlt] = tbl->prev_mem[rlt];
+			tbl->prev_mem[rlt] = buf;
+			tbl->prev_mem[rlt].phys_base = 0;
+		}
+	}
+}
+
 /**
  * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
  *  assign priorities to the rules, calculate their sizes and calculate
@@ -367,15 +394,19 @@ allocate_failed:
  * tbl bodies at the sram is enough for the commit
  * @ipt: the ip address family type
  * @rlt: the rule type (hashable or non-hashable)
- * @aligned_sz_lcl_tbls: calculated required aligned size
  *
  * Return: true if enough space available or false in other cases
  */
 static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
-	enum ipa_rule_type rlt, u32 aligned_sz_lcl_tbls)
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
 {
 	u16 avail;
 
+	if (!bdy) {
+		IPAERR("Bad parameters, bdy = NULL\n");
+		return false;
+	}
+
 	if (ipt == IPA_IP_v4)
 		avail = (rlt == IPA_RULE_HASHABLE) ?
 			IPA_MEM_PART(apps_v4_flt_hash_size) :
@@ -385,11 +416,11 @@ static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
 			IPA_MEM_PART(apps_v6_flt_hash_size) :
 			IPA_MEM_PART(apps_v6_flt_nhash_size);
 
-	if (aligned_sz_lcl_tbls <= avail)
+	if (bdy->size <= avail)
 		return true;
 
 	IPADBG("tbl too big, needed %d avail %d ipt %d rlt %d\n",
-	       aligned_sz_lcl_tbls, avail, ipt, rlt);
+	       bdy->size, avail, ipt, rlt);
 	return false;
 }
 
@@ -493,9 +524,9 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 	struct ipahal_reg_valmask valmask;
 	u32 tbl_hdr_width;
 	struct ipa3_flt_tbl *tbl;
-	struct ipa3_flt_tbl_nhash_lcl *lcl_tbl;
 	u16 entries;
 	struct ipahal_imm_cmd_register_write reg_write_coal_close;
+	bool prev_tbl_forced_sys = false;
 
 	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
 	memset(&alloc_params, 0, sizeof(alloc_params));
@@ -513,8 +544,8 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 			IPA_MEM_PART(apps_v4_flt_hash_ofst);
 		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
 			IPA_MEM_PART(apps_v4_flt_nhash_ofst);
-		lcl_hash = ipa3_ctx->flt_tbl_hash_lcl[IPA_IP_v4];
-		lcl_nhash = ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v4];
+		lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
 	} else {
 		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
 			IPA_MEM_PART(v6_flt_hash_ofst) +
@@ -526,8 +557,8 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 			IPA_MEM_PART(apps_v6_flt_hash_ofst);
 		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
 			IPA_MEM_PART(apps_v6_flt_nhash_ofst);
-		lcl_hash = ipa3_ctx->flt_tbl_hash_lcl[IPA_IP_v6];
-		lcl_nhash = ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v6];
+		lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
 	}
 
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
@@ -538,6 +569,9 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 			rc = -EPERM;
 			goto prep_failed;
 		}
+		/* Check if table was forced to sys previously */
+		if (tbl->force_sys[IPA_RULE_NON_HASHABLE])
+			prev_tbl_forced_sys = true;
 
 		/* First try fitting tables in lcl memory if allowed */
 		tbl->force_sys[IPA_RULE_NON_HASHABLE] = false;
@@ -559,37 +593,70 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 		}
 	}
 
+	if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
+		rc = -EFAULT;
+		goto prep_failed;
+	}
+
 	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
-		ipa_fltrt_get_aligned_lcl_bdy_size(alloc_params.total_sz_lcl_hash_tbls))) {
+		&alloc_params.hash_bdy)) {
 		IPAERR_RL("Hash filter table for IP:%d too big to fit in lcl memory\n",
 			ip);
 		rc = -EFAULT;
 		goto fail_size_valid;
 	}
 
-	/* Check Non-Hash filter tables fits in SRAM, if it is not - move some tables to DDR */
-	list_for_each_entry(lcl_tbl, &ipa3_ctx->flt_tbl_nhash_lcl_list[ip], link) {
-		if (ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
-			ipa_fltrt_get_aligned_lcl_bdy_size(alloc_params.total_sz_lcl_nhash_tbls)) ||
-			alloc_params.num_lcl_nhash_tbls == 0)
-			break;
-
-		IPADBG("SRAM partition is too small, move one non-hash table in DDR. "
-		       "IP:%d alloc_params.total_sz_lcl_nhash_tbls = %u\n",
-		       ip, alloc_params.total_sz_lcl_nhash_tbls);
-
-		/* Move lowest priority Eth client to DDR */
-		lcl_tbl->tbl->force_sys[IPA_RULE_NON_HASHABLE] = true;
+	/* Check Non-Hash filter table fits in SRAM, if it is not - move all
+	   tables to DDR */
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		IPADBG("SRAM partition is too small, place tables in DDR. IP:%d\n",
+			ip);
 
-		alloc_params.num_lcl_nhash_tbls--;
-		alloc_params.total_sz_lcl_nhash_tbls -= lcl_tbl->tbl->sz[IPA_RULE_NON_HASHABLE];
-		alloc_params.total_sz_lcl_nhash_tbls += tbl_hdr_width;
-	}
+		/* If no tables were forced to be in sys mem before, inc stats */
+		if (!prev_tbl_forced_sys)
+			ipa3_ctx->non_hash_flt_lcl_sys_switch++;
+
+		/* Reap previous generated tables before generating them again */
+		__ipa_reap_curr_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
+		__ipa_reap_curr_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
+
+		/* Free & zero out the alloc_params before genereting them again */
+		if (alloc_params.hash_hdr.size)
+			ipahal_free_dma_mem(&alloc_params.hash_hdr);
+		ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+		if (alloc_params.hash_bdy.size)
+			ipahal_free_dma_mem(&alloc_params.hash_bdy);
+		if (alloc_params.nhash_bdy.size)
+			ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+
+		memset(&alloc_params, 0, sizeof(alloc_params));
+		alloc_params.ipt = ip;
+		alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
+
+		for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+			if (!ipa_is_ep_support_flt(i))
+				continue;
+			tbl = &ipa3_ctx->flt_tbl[i][ip];
+			tbl->force_sys[IPA_RULE_NON_HASHABLE] = true;
+
+			if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
+				rc = -EPERM;
+				goto prep_failed;
+			}
+		}
 
-	if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
-		IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
-		rc = -EFAULT;
-		goto prep_failed;
+		/* Generate tables again with all nhash tables forced in system mem */
+		if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
+			IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
+			rc = -EFAULT;
+			goto prep_failed;
+		}
+	} else {
+		/* If tables were forced to be in sys mem before, inc stats */
+		if (prev_tbl_forced_sys)
+			ipa3_ctx->non_hash_flt_lcl_sys_switch++;
 	}
 
 	/* +4: 2 for bodies (hashable and non-hashable), 1 for flushing and 1

+ 9 - 10
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -906,11 +906,6 @@ struct ipa3_flt_tbl {
 	bool force_sys[IPA_RULE_TYPE_MAX];
 };
 
-struct ipa3_flt_tbl_nhash_lcl {
-	struct list_head link;
-	struct ipa3_flt_tbl *tbl;
-};
-
 /**
  * struct ipa3_rt_entry - IPA routing table entry
  * @link: entry's link in global routing table entries list
@@ -2157,11 +2152,14 @@ struct ipa3_context {
 	bool hdr_proc_ctx_tbl_lcl;
 	struct ipa_mem_buffer hdr_sys_mem;
 	struct ipa_mem_buffer hdr_proc_ctx_mem;
-	bool rt_tbl_hash_lcl[IPA_IP_MAX];
-	bool rt_tbl_nhash_lcl[IPA_IP_MAX];
-	bool flt_tbl_hash_lcl[IPA_IP_MAX];
-	bool flt_tbl_nhash_lcl[IPA_IP_MAX];
-	struct list_head flt_tbl_nhash_lcl_list[IPA_IP_MAX];
+	bool ip4_rt_tbl_hash_lcl;
+	bool ip4_rt_tbl_nhash_lcl;
+	bool ip6_rt_tbl_hash_lcl;
+	bool ip6_rt_tbl_nhash_lcl;
+	bool ip4_flt_tbl_hash_lcl;
+	bool ip4_flt_tbl_nhash_lcl;
+	bool ip6_flt_tbl_hash_lcl;
+	bool ip6_flt_tbl_nhash_lcl;
 	struct ipa3_active_clients ipa3_active_clients;
 	struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
 	struct workqueue_struct *power_mgmt_wq;
@@ -2332,6 +2330,7 @@ struct ipa3_context {
 	u16 ulso_ip_id_max;
 	bool use_pm_wrapper;
 	u8 page_poll_threshold;
+	u32 non_hash_flt_lcl_sys_switch;
 	bool wan_common_page_pool;
 	u64 gsi_msi_addr;
 	u64 gsi_msi_clear_addr;

+ 10 - 6
drivers/platform/msm/ipa/ipa_v3/ipa_rt.c

@@ -498,8 +498,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
 			IPA_MEM_PART(apps_v4_rt_hash_ofst);
 		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
 			IPA_MEM_PART(apps_v4_rt_nhash_ofst);
-		lcl_hash = ipa3_ctx->rt_tbl_hash_lcl[IPA_IP_v4];
-		lcl_nhash = ipa3_ctx->rt_tbl_nhash_lcl[IPA_IP_v4];
+		lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl;
 		alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) -
 			IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
 	} else {
@@ -516,8 +516,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
 			IPA_MEM_PART(apps_v6_rt_hash_ofst);
 		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
 			IPA_MEM_PART(apps_v6_rt_nhash_ofst);
-		lcl_hash = ipa3_ctx->rt_tbl_hash_lcl[IPA_IP_v6];
-		lcl_nhash = ipa3_ctx->rt_tbl_nhash_lcl[IPA_IP_v6];
+		lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl;
 		alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) -
 			IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
 	}
@@ -888,8 +888,12 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
 		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
 		entry->set = set;
 		entry->cookie = IPA_RT_TBL_COOKIE;
-		entry->in_sys[IPA_RULE_HASHABLE] = !ipa3_ctx->rt_tbl_hash_lcl[ip];
-		entry->in_sys[IPA_RULE_NON_HASHABLE] = !ipa3_ctx->rt_tbl_nhash_lcl[ip];
+		entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_hash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_nhash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_nhash_lcl;
 		set->tbl_cnt++;
 		entry->rule_ids = &set->rule_ids;
 		list_add(&entry->link, &set->head_rt_tbl_list);

+ 10 - 11
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -6612,20 +6612,19 @@ void _ipa_sram_settings_read_v3_0(void)
 		ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
 			IPA_MEM_PART(modem_hdr_proc_ctx_size);
 	}
-
-	ipa3_ctx->rt_tbl_hash_lcl[IPA_IP_v4] = false;
-	ipa3_ctx->rt_tbl_nhash_lcl[IPA_IP_v4] = false;
-	ipa3_ctx->rt_tbl_hash_lcl[IPA_IP_v6] = false;
-	ipa3_ctx->rt_tbl_nhash_lcl[IPA_IP_v6] = false;
-	ipa3_ctx->flt_tbl_hash_lcl[IPA_IP_v4] = false;
-	ipa3_ctx->flt_tbl_hash_lcl[IPA_IP_v6] = false;
+	ipa3_ctx->ip4_rt_tbl_hash_lcl =	false;
+	ipa3_ctx->ip4_rt_tbl_nhash_lcl = false;
+	ipa3_ctx->ip6_rt_tbl_hash_lcl = false;
+	ipa3_ctx->ip6_rt_tbl_nhash_lcl = false;
+	ipa3_ctx->ip4_flt_tbl_hash_lcl = false;
+	ipa3_ctx->ip6_flt_tbl_hash_lcl = false;
 
 	if (ipa3_ctx->ipa_hw_type == IPA_HW_v5_0) {
-		ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v4] = true;
-		ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v6] = true;
+		ipa3_ctx->ip4_flt_tbl_nhash_lcl = true;
+		ipa3_ctx->ip6_flt_tbl_nhash_lcl = true;
 	} else {
-		ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v4] = false;
-		ipa3_ctx->flt_tbl_nhash_lcl[IPA_IP_v6] = false;
+		ipa3_ctx->ip4_flt_tbl_nhash_lcl = false;
+		ipa3_ctx->ip6_flt_tbl_nhash_lcl = false;
 	}
 }
 

+ 20 - 20
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c

@@ -4429,22 +4429,6 @@ nhash_alloc_fail:
 	return -ENOMEM;
 }
 
-u32 ipa_fltrt_get_aligned_lcl_bdy_size(u32 total_sz_lcl_tbls)
-{
-	u32 result = total_sz_lcl_tbls;
-	struct ipahal_fltrt_obj *obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
-
-	/* for table terminator */
-	result += obj->tbl_width * total_sz_lcl_tbls;
-	/* align the start of local rule-set */
-	result += obj->lcladdr_alignment * total_sz_lcl_tbls;
-	/* SRAM block size alignment */
-	result += obj->blk_sz_alignment;
-	result &= ~(obj->blk_sz_alignment);
-
-	return result;
-}
-
 /*
  * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
  *  local flt/rt tables bodies to be filled into sram
@@ -4475,8 +4459,16 @@ static int ipa_fltrt_alloc_lcl_bdy(
 	 *  and H/W local table start offset alignment
 	 */
 	if (params->total_sz_lcl_nhash_tbls + params->num_lcl_nhash_tbls > 0) {
-		params->nhash_bdy.size =
-			ipa_fltrt_get_aligned_lcl_bdy_size(params->total_sz_lcl_nhash_tbls);
+		params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+		/* for table terminator */
+		params->nhash_bdy.size += obj->tbl_width *
+			params->num_lcl_nhash_tbls;
+		/* align the start of local rule-set */
+		params->nhash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_nhash_tbls;
+		/* SRAM block size alignment */
+		params->nhash_bdy.size += obj->blk_sz_alignment;
+		params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
 
 		IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
 			params->nhash_bdy.size);
@@ -4502,8 +4494,16 @@ alloc1:
 	}
 
 	if (obj->support_hash && params->hash_bdy.size) {
-		params->hash_bdy.size = 
-			ipa_fltrt_get_aligned_lcl_bdy_size(params->total_sz_lcl_hash_tbls);
+		params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+		/* for table terminator */
+		params->hash_bdy.size += obj->tbl_width *
+			params->num_lcl_hash_tbls;
+		/* align the start of local rule-set */
+		params->hash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_hash_tbls;
+		/* SRAM block size alignment */
+		params->hash_bdy.size += obj->blk_sz_alignment;
+		params->hash_bdy.size &= ~(obj->blk_sz_alignment);
 
 		IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
 			params->hash_bdy.size);

+ 0 - 7
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h

@@ -307,12 +307,5 @@ int ipahal_rt_parse_hw_rule(u8 *rule_addr,
 int ipahal_flt_parse_hw_rule(u8 *rule_addr,
 	struct ipahal_flt_rule_entry *rule);
 
-/*
- * ipa_fltrt_get_aligned_lcl_bdy_size() - Calculate real SRAM block aligned size
- *  required for flt table bodies
- * @total_sz_lcl_tbls: [in] The size in driver cashe
- */
-u32 ipa_fltrt_get_aligned_lcl_bdy_size(u32 total_sz_lcl_tbls);
-
 
 #endif /* _IPAHAL_FLTRT_H_ */