浏览代码

Fastforwarding dataipa CRT:data-kernel.lnx.1.2-220128 to data-kernel.lnx.2.0

Kasiviswanathan, Subash Abhinov 3 年之前
父节点
当前提交
a8a418196b

+ 2 - 2
drivers/platform/msm/gsi/gsihal/gsihal_reg.c

@@ -1209,10 +1209,10 @@ static struct gsihal_reg_obj gsihal_reg_objs[GSI_VER_MAX][GSI_REG_MAX] = {
 	0x0001f180, 0x4000, 0},
 	[GSI_VER_1_0][GSI_EE_n_CNTXT_MSI_BASE_LSB] = {
 	gsireg_construct_dummy, gsireg_parse_dummy,
-	0x0001f180, 0x4000, 0},
+	0x0001f188, 0x4000, 0},
 	[GSI_VER_1_0][GSI_EE_n_CNTXT_MSI_BASE_MSB] = {
 	gsireg_construct_dummy, gsireg_parse_dummy,
-	0x0001f180, 0x4000, 0},
+	0x0001f18c, 0x4000, 0},
 	[GSI_VER_1_0][GSI_EE_n_GSI_STATUS] = {
 	gsireg_construct_dummy, gsireg_parse_gsi_status,
 	0x0001f000, 0x4000, 0},

+ 16 - 0
drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c

@@ -1380,6 +1380,11 @@ static int ipa_mhi_connect_pipe_internal(struct ipa_mhi_connect_params *in, u32
 
 	}
 
+	if (in->sys.client == IPA_CLIENT_MHI_LOW_LAT_PROD)
+		ipa3_update_mhi_ctrl_state(IPA_MHI_CTRL_UL_SETUP, true);
+	else if (in->sys.client == IPA_CLIENT_MHI_LOW_LAT_CONS)
+		ipa3_update_mhi_ctrl_state(IPA_MHI_CTRL_DL_SETUP, true);
+
 	mutex_unlock(&mhi_client_general_mutex);
 
 	if (!in->sys.keep_ipa_awake)
@@ -1436,6 +1441,10 @@ static int ipa_mhi_disconnect_pipe_internal(u32 clnt_hdl)
 		return -EINVAL;
 	}
 
+	if (client == IPA_CLIENT_MHI_LOW_LAT_PROD)
+		ipa3_update_mhi_ctrl_state(IPA_MHI_CTRL_UL_SETUP, false);
+	else if (client == IPA_CLIENT_MHI_LOW_LAT_CONS)
+		ipa3_update_mhi_ctrl_state(IPA_MHI_CTRL_DL_SETUP, false);
 	IPA_ACTIVE_CLIENTS_INC_EP(client);
 
 	res = ipa_mhi_reset_channel(channel, false);
@@ -2209,6 +2218,13 @@ static int ipa_mhi_register_pm(void)
 		goto fail_pm_cons;
 	}
 
+	res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl,
+		IPA_CLIENT_MHI_LOW_LAT_CONS);
+	if (res) {
+		IPA_MHI_ERR("fail to associate low_lat_cons with PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
 	res = ipa_pm_set_throughput(ipa_mhi_client_ctx->pm_hdl, 1000);
 	if (res) {
 		IPA_MHI_ERR("fail to set perf profile to PM %d\n", res);

+ 231 - 160
drivers/platform/msm/ipa/ipa_v3/dump/ipa5.0/ipa_pkt_cntxt.h

@@ -6,178 +6,249 @@
 #define _IPA_PKT_CNTXT_H_
 
 #define IPA_HW_PKT_CTNTX_MAX        0x10
-#define IPA_HW_NUM_SAVE_PKT_CTNTX   0x8
-#define IPA_HW_PKT_CTNTX_START_ADDR 0xE434CA00
-#define IPA_HW_PKT_CTNTX_SIZE       (sizeof(ipa_pkt_ctntx_opcode_state_s) + \
-				     sizeof(ipa_pkt_ctntx_u))
 
 /*
  * Packet Context States
  */
 enum ipa_hw_pkt_cntxt_state_e {
 	IPA_HW_PKT_CNTXT_STATE_HFETCHER_INIT = 1,
-	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR,
-	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR_REP,
-	IPA_HW_PKT_CNTXT_STATE_H_DCPH,
-	IPA_HW_PKT_CNTXT_STATE_PKT_PARSER,
-	IPA_HW_PKT_CNTXT_STATE_FILTER_NAT,
-	IPA_HW_PKT_CNTXT_STATE_ROUTER,
-	IPA_HW_PKT_CNTXT_STATE_HDRI,
-	IPA_HW_PKT_CNTXT_STATE_UCP,
-	IPA_HW_PKT_CNTXT_STATE_ENQUEUER,
-	IPA_HW_PKT_CNTXT_STATE_DFETCHER,
-	IPA_HW_PKT_CNTXT_STATE_D_DCPH,
-	IPA_HW_PKT_CNTXT_STATE_DISPATCHER,
-	IPA_HW_PKT_CNTXT_STATE_TX,
-	IPA_HW_PKT_CNTXT_STATE_TX_ZLT,
-	IPA_HW_PKT_CNTXT_STATE_DFETCHER_DMAR,
-	IPA_HW_PKT_CNTXT_STATE_DCMP,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR = 2,
+	IPA_HW_PKT_CNTXT_STATE_H_DCPH = 3,
+	IPA_HW_PKT_CNTXT_STATE_MULTI_DRBIP = 4,
+	IPA_HW_PKT_CNTXT_STATE_PKT_PARSER = 5,
+	IPA_HW_PKT_CNTXT_STATE_FILTER_NAT = 6,
+	IPA_HW_PKT_CNTXT_STATE_ROUTER = 7,
+	IPA_HW_PKT_CNTXT_STATE_HDRI = 8,
+	IPA_HW_PKT_CNTXT_STATE_UCP = 9,
+	IPA_HW_PKT_CNTXT_STATE_COAL_MASTER = 10,
+	IPA_HW_PKT_CNTXT_STATE_ENQUEUER = 11,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER = 12,
+	IPA_HW_PKT_CNTXT_STATE_D_DCPH = 13,
+	IPA_HW_PKT_CNTXT_STATE_DISPATCHER = 14,
+	IPA_HW_PKT_CNTXT_STATE_TX = 15,
+	IPA_HW_PKT_CNTXT_STATE_TX_ZLT = 16,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER_DMAR = 17,
+	IPA_HW_PKT_CNTXT_STATE_D_DCPH_2 = 19,
+	IPA_HW_PKT_CNTXT_STATE_TX_RSRCREL = 20,
 };
 
 /*
  * Packet Context fields as received from VI/Design
  */
 struct ipa_pkt_ctntx_s {
-	u64	opcode                           : 8;
-	u64	state                            : 5;
-	u64	not_used_1                       : 2;
-	u64	tx_pkt_dma_done                  : 1;
-	u64	exc_deagg                        : 1;
-	u64	exc_pkt_version                  : 1;
-	u64	exc_pkt_len                      : 1;
-	u64	exc_threshold                    : 1;
-	u64	exc_sw                           : 1;
-	u64	exc_nat                          : 1;
-	u64	exc_frag_miss                    : 1;
-	u64	filter_bypass                    : 1;
-	u64	router_bypass                    : 1;
-	u64	nat_bypass                       : 1;
-	u64	hdri_bypass                      : 1;
-	u64	dcph_bypass                      : 1;
-	u64	security_credentials_select      : 1;
-	u64	pkt_2nd_pass                     : 1;
-	u64	xlat_bypass                      : 1;
-	u64	dcph_valid                       : 1;
-	u64	ucp_on                           : 1;
-	u64	replication                      : 1;
-	u64	src_status_en                    : 1;
-	u64	dest_status_en                   : 1;
-	u64	frag_status_en                   : 1;
-	u64	eot_dest                         : 1;
-	u64	eot_notif                        : 1;
-	u64	prev_eot_dest                    : 1;
-	u64	src_hdr_len                      : 8;
-	u64	tx_valid_sectors                 : 8;
-	u64	rx_flags                         : 8;
-	u64	rx_packet_length                 : 16;
-	u64	revised_packet_length            : 16;
-	u64	frag_en                          : 1;
-	u64	frag_bypass                      : 1;
-	u64	frag_process                     : 1;
-	u64	notif_pipe                       : 5;
-	u64	src_id                           : 8;
-	u64	tx_pkt_transferred               : 1;
-	u64	src_pipe                         : 5;
-	u64	dest_pipe                        : 5;
-	u64	frag_pipe                        : 5;
-	u64	ihl_offset                       : 8;
-	u64	protocol                         : 8;
-	u64	tos                              : 8;
-	u64	id                               : 16;
-	u64	v6_reserved                      : 4;
-	u64	ff                               : 1;
-	u64	mf                               : 1;
-	u64	pkt_israg                        : 1;
-	u64	tx_holb_timer_overflow           : 1;
-	u64	tx_holb_timer_running            : 1;
-	u64	trnseq_0                         : 3;
-	u64	trnseq_1                         : 3;
-	u64	trnseq_2                         : 3;
-	u64	trnseq_3                         : 3;
-	u64	trnseq_4                         : 3;
-	u64	trnseq_ex_length                 : 8;
-	u64	trnseq_4_length                  : 8;
-	u64	trnseq_4_offset                  : 8;
-	u64	dps_tx_pop_cnt                   : 2;
-	u64	dps_tx_push_cnt                  : 2;
-	u64	vol_ic_dcph_cfg                  : 1;
-	u64	vol_ic_tag_stts                  : 1;
-	u64	vol_ic_pxkt_init_e               : 1;
-	u64	vol_ic_pkt_init                  : 1;
-	u64	tx_holb_counter                  : 32;
-	u64	trnseq_0_length                  : 8;
-	u64	trnseq_0_offset                  : 8;
-	u64	trnseq_1_length                  : 8;
-	u64	trnseq_1_offset                  : 8;
-	u64	trnseq_2_length                  : 8;
-	u64	trnseq_2_offset                  : 8;
-	u64	trnseq_3_length                  : 8;
-	u64	trnseq_3_offset                  : 8;
-	u64	dmar_valid_length                : 16;
-	u64	dcph_valid_length                : 16;
-	u64	frag_hdr_offset                  : 9;
-	u64	ip_payload_offset                : 9;
-	u64	frag_rule                        : 4;
-	u64	frag_table                       : 1;
-	u64	frag_hit                         : 1;
-	u64	data_cmdq_ptr                    : 8;
-	u64	filter_result                    : 6;
-	u64	router_result                    : 6;
-	u64	nat_result                       : 6;
-	u64	hdri_result                      : 6;
-	u64	dcph_result                      : 6;
-	u64	dcph_result_valid                : 1;
-	u32	not_used_2                       : 4;
-	u64	tx_pkt_suspended                 : 1;
-	u64	tx_pkt_dropped                   : 1;
-	u32	not_used_3                       : 3;
-	u64	metadata_valid                   : 1;
-	u64	metadata_type                    : 4;
-	u64	ul_cs_start_diff                 : 9;
-	u64	cs_disable_trlr_vld_bit          : 1;
-	u64	cs_required                      : 1;
-	u64	dest_hdr_len                     : 8;
-	u64	fr_l                             : 1;
-	u64	fl_h                             : 1;
-	u64	fr_g                             : 1;
-	u64	fr_ret                           : 1;
-	u64	fr_rule_id                       : 10;
-	u64	rt_l                             : 1;
-	u64	rt_h                             : 1;
-	u64	rtng_tbl_index                   : 5;
-	u64	rt_match                         : 1;
-	u64	rt_rule_id                       : 10;
-	u64	nat_tbl_index                    : 13;
-	u64	nat_type                         : 2;
-	u64	hdr_l                            : 1;
-	u64	header_offset                    : 10;
-	u64	not_used_4                       : 1;
-	u64	filter_result_valid              : 1;
-	u64	router_result_valid              : 1;
-	u64	nat_result_valid                 : 1;
-	u64	hdri_result_valid                : 1;
-	u64	not_used_5                       : 1;
-	u64	stream_id                        : 8;
-	u64	not_used_6                       : 6;
-	u64	dcph_context_index               : 2;
-	u64	dcph_cfg_size                    : 16;
-	u64	dcph_cfg_count                   : 32;
-	u64	tag_info                         : 48;
-	u64	ucp_cmd_id                       : 16;
-	u64	metadata                         : 32;
-	u64	ucp_cmd_params                   : 32;
-	u64	nat_ip_address                   : 32;
-	u64	nat_ip_cs_diff                   : 16;
-	u64	frag_dest_pipe                   : 5;
-	u64	frag_nat_type                    : 2;
-	u64	fragr_ret                        : 1;
-	u64	frag_protocol                    : 8;
-	u64	src_ip_address                   : 32;
-	u64	dest_ip_address                  : 32;
-	u64	not_used_7                       : 37;
-	u64	frag_hdr_l                       : 1;
-	u64	frag_header_offset               : 10;
-	u64	frag_id                          : 16;
+	u64	opcode                           : 8; /* Word 0 Bits 0-7 */
+	u64	state                            : 5; /* Word 0 Bits 8-12 */
+	u64	stats_disable                    : 1; /* Word 0 Bit 13 */
+	u64	exc_ucp                          : 1; /* Word 0 Bit 14 */
+	u64	tx_pkt_dma_done                  : 1; /* Word 0 Bit 15 */
+	u64	exc_deagg                        : 1; /* Word 0 Bit 16 */
+	u64	exc_pkt_version                  : 1; /* Word 0 Bit 17 */
+	u64	exc_pkt_len                      : 1; /* Word 0 Bit 18 */
+	u64	exc_threshold                    : 1; /* Word 0 Bit 19 */
+	u64	exc_sw                           : 1; /* Word 0 Bit 20 */
+	u64	exc_nat                          : 1; /* Word 0 Bit 21 */
+	u64	exc_frag_miss                    : 1; /* Word 0 Bit 22 */
+	u64	filter_bypass                    : 1; /* Word 0 Bit 23 */
+	u64	router_bypass                    : 1; /* Word 0 Bit 24 */
+	u64	nat_bypass                       : 1; /* Word 0 Bit 25 */
+	u64	hdri_bypass                      : 1; /* Word 0 Bit 26 */
+	u64	dcph_bypass                      : 1; /* Word 0 Bit 27 */
+	u64	security_credentials_select      : 1; /* Word 0 Bit 28 */
+	u64	dcph_valid                       : 1; /* Word 0 Bit 29 */
+	u64	round_bypass                     : 1; /* Word 0 Bit 30 */
+	u64	bearer_valid                     : 1; /* Word 0 Bit 31 */
+	u64	ucp_on                           : 1; /* Word 0 Bit 32 */
+	u64	replication                      : 1; /* Word 0 Bit 33 */
+	u64	src_status_en                    : 1; /* Word 0 Bit 34 */
+	u64	dest_status_en                   : 1; /* Word 0 Bit 35 */
+	u64	frag_status_en                   : 1; /* Word 0 Bit 36 */
+	u64	eot_dest                         : 1; /* Word 0 Bit 37 */
+	u64	eot_notif                        : 1; /* Word 0 Bit 38 */
+	u64	prev_eot_dest                    : 1; /* Word 0 Bit 39 */
+	u64	l2_len                           : 9; /* Word 0 Bits 40-48 */
+	u64	dispatcher_pass                  : 1; /* Word 0 Bit 49 */
+	u64	ucp_on_for_rts                   : 1; /* Word 0 Bit 50 */
+	u64	exc_hdri                         : 1; /* Word 0 Bit 51 */
+	u64	pkt_parser_bypass                : 1; /* Word 0 Bit 52 */
+	u64	exc_pipe                         : 1; /* Word 0 Bit 53 */
+	u64	nat_in_hdrs                      : 1; /* Word 0 Bit 54 */
+	u64	pkt_has_padding                  : 1; /* Word 0 Bit 55 */
+	u64	rx_flags                         : 8; /* Word 0 Bits 56-63 */
+	u64	rx_packet_length                 : 16; /* Word 1 Bits 0-15 */
+	u64	revised_packet_length            : 16; /* Word 1 Bits 16-31 */
+	u64	frag_en                          : 1; /* Word 1 Bit 32 */ 
+	u64	frag_bypass                      : 1; /* Word 1 Bit 33 */
+	u64	frag_process                     : 1; /* Word 1 Bit 34 */
+	u64	tx_pkt_transferred               : 1; /* Word 1 Bit 35 */
+	u64	filter_aggr_force_close          : 1; /* Word 1 Bit 36 */
+	u64	router_aggr_force_close          : 1; /* Word 1 Bit 37 */
+	u64	not_used1                        : 2; /* Word 1 Bits 38-39 */
+	u64	src_id                           : 8; /* Word 1 Bits 40-47 */
+	u64	src_pipe                         : 8; /* Word 1 Bits 48-55 */
+	u64	dest_pipe                        : 8; /* Word 1 Bits 56-63 */
+	u64	ihl_offset                       : 6; /* Word 2 Bits 0-5 */
+	u64	d_dcph_pass                      : 1; /* Word 2 Bit 6 */
+	u64	not_used2                        : 1; /* Word 2 Bit 7 */
+	u64	protocol                         : 8; /* Word 2 Bits 8-15 */
+	u64	tos                              : 8; /* Word 2 Bits 16-23 */
+	u64	id                               : 16; /* Word 2 Bits 24-39 */
+	u64	v6_reserved                      : 4; /* Word 2 Bits 40-43 */
+	u64	ff                               : 1; /* Word 2 Bit 44 */
+	u64	mf                               : 1; /* Word 2 Bit 45 */
+	u64	pkt_is_frag                      : 1; /* Word 2 Bit 46 */
+	u64	cs_disavle_trailer_valid_bit     : 1; /* Word 2 Bit 47 */
+	u64	exc_checksum                     : 1; /* Word 2 Bit 48 */
+	u64	trnseq_0                         : 3; /* Word 2 Bits 49-51 */
+	u64	trnseq_1                         : 3; /* Word 2 Bits 52-54 */
+	u64	trnseq_2                         : 3; /* Word 2 Bits 55-57 */
+	u64	trnseq_3                         : 3; /* Word 2 Bits 58-60 */
+	u64	trnseq_4                         : 3; /* Word 2 Bits 61-63 */
+	u64	trnseq_ex_length                 : 8; /* Word 3 Bits 0-7 */
+	u64	trnseq_4_length                  : 8; /* Word 3 Bits 8-15 */
+	u64	trnseq_4_offset                  : 8; /* Word 3 Bits 16-23 */
+	u64	dps_tx_pop_cnt                   : 2; /* Word 3 Bits 24-25 */
+	u64	dps_tx_push_cnt                  : 2; /* Word 3 Bits 26-27 */
+	u64	vol_ic_dcph_cfg                  : 1; /* Word 3 Bit 28 */
+	u64	vol_ic_tag_stts                  : 1; /* Word 3 Bit 29 */
+	u64	vol_ic_pxkt_init_ex              : 1; /* Word 3 Bit 30 */
+	u64	vol_ic_pkt_init                  : 1; /* Word 3 Bit 31 */
+	u64	trnseq_0_preucp                  : 1; /* Word 3 Bit 32 */
+	u64	dest_pipe_overridden_ucp         : 1; /* Word 3 Bit 33 */
+	u64	force_to_default                 : 1; /* Word 3 Bit 34 */
+	u64	close_vp_before                  : 1; /* Word 3 Bit 35 */
+	u64	vol_ic_eob_bubble                : 1; /* Word 3 Bit 36 */
+	u64	not_used3                        : 5; /* Word 3 Bits 37-41 */
+	u64	maci_bytes_in_trnseq             : 1; /* Word 3 Bit 42 */
+	u64	drop_drbip                       : 1; /* Word 3 Bit 43 */
+	u64	exc_drbip                        : 1; /* Word 3 Bit 44 */
+	u64	drbip_valid                      : 1; /* Word 3 Bit 45 */
+	u64	tx_pkt_suspended                 : 1; /* Word 3 Bit 46 */
+	u64	rb                               : 1; /* Word 3 Bit 47 */
+	u64	tcp_win_size                     : 16; /* Word 3 Bits 48-63 */
+	u64	trnseq_0_length                  : 8; /* Word 4 Bits 0-7 */
+	u64	trnseq_0_offset                  : 8; /* Word 4 Bits 8-15 */
+	u64	trnseq_1_length                  : 8; /* Word 4 Bits 16-23 */
+	u64	trnseq_1_offset                  : 8; /* Word 4 Bits 24-31 */
+	u64	trnseq_2_length                  : 8; /* Word 4 Bits 32-39 */
+	u64	trnseq_2_offset                  : 8; /* Word 4 Bits 40-47 */
+	u64	trnseq_3_length                  : 8; /* Word 4 Bits 48-55 */
+	u64	trnseq_3_offset                  : 8; /* Word 4 Bits 56-63 */
+	u64	dmar_valid_length                : 16; /* Word 5 Bits 0-15 */
+	u64	dcph_valid_length                : 16; /* Word 5 Bits 16-31 */
+	u64	frag_pipe                        : 8; /* Word 5 Bits 32-39 */
+	u64	notif_pipe                       : 8; /* Word 5 Bits 40-47 */
+	u64	not_used4                        : 8; /* Word 5 Bits 48-55 */
+	u64	vp_index                         : 8; /* Word 5 Bits 56-63 */
+	u64	l4_payload_checksum              : 16; /* Word 6 Bits 0-15 */
+	u64	l4_pseudo_hdr_checksum           : 16; /* Word 6 Bits 16-31 */
+	u64	frag_hdr_offset                  : 9; /* Word 6 Bits 32-40 */
+	u64	not_used5                        : 1; /* Word 6 Bit 41 */
+	u64	ece                              : 1; /* Word 6 Bit 42 */
+	u64	udp_with_zero_checksum           : 1; /* Word 6 Bit 43 */
+	u64	router_rule_table_hit            : 1; /* Word 6 Bit 44 */
+	u64	filter_rule_table_hit            : 1; /* Word 6 Bit 45 */
+	u64	hps_round_cnt                    : 2; /* Word 6 Bits 46-47 */
+	u64	first_pkt_parser_done            : 1; /* Word 6 Bit 48 */
+	u64	frag_hit_2nd                     : 1; /* Word 6 Bit 49 */
+	u64	frag_rule                        : 4; /* Word 6 Bits 50-53 */
+	u64	frag_table                       : 1; /* Word 6 Bit 54 */
+	u64	frag_hit                         : 1; /* Word 6 Bit 55 */
+	u64	data_cmdq_ptr                    : 8; /* Word 6 Bits 56-63 */
+	u64	filter_result                    : 6; /* Word 7 Bits 0-5 */
+	u64	nat_result                       : 6; /* Word 7 Bits 6-11 */
+	u64	tx_pkt_dropped                   : 1; /* Word 7 Bit 12 */
+	u64	not_used6                        : 2; /* Word 7 Bits 13-14 */
+	u64	original_hdr_size                : 9; /* Word 7 Bits 15-23 */
+	u64	frag_dest_pipe                   : 8; /* Word 7 Bits 24-31 */
+	u64	filter_action_params             : 5; /* Word 7 Bits 32-36 */
+	u64	pure_ack                         : 1; /* Word 7 Bit 37 */
+	u64	syn                              : 1; /* Word 7 Bit 38 */
+	u64	fin                              : 1; /* Word 7 Bit 39 */
+	u64	ipv4_vld_checksum                : 1; /* Word 7 Bit 40 */
+	u64	metadata_type                    : 3; /* Word 7 Bits 41-43 */
+	u64	qmap_cs_valid_bit                : 1; /* Word 7 Bit 44 */
+	u64	df                               : 1; /* Word 7 Bit 45 */
+	u64	ttl                              : 8; /* Word 7 Bits 46-53 */
+	u64	original_ip_version              : 2; /* Word 7 Bits 54-55 */
+	u64	original_src_hdr_len             : 8; /* Word 7 Bits 56-63 */
+	u64	fl_l                             : 1; /* Word 8 Bit 0 */
+	u64	fl_h                             : 1; /* Word 8 Bit 1 */
+	u64	fr_g                             : 1; /* Word 8 Bit 2 */
+	u64	fr_ret                           : 1; /* Word 8 Bit 3 */
+	u64	fr_rule_id                       : 10; /* Word 8 Bits 4-13 */
+	u64	rt_l                             : 1; /* Word 8 Bit 14 */
+	u64	rt_h                             : 1; /* Word 8 Bit 15 */
+	u64	hdri_payload_length_includes_padding : 1; /* Word 8 Bit 16 */
+	u64	hdri_pdding_or_total_length      : 1; /* Word 8 Bit 17 */
+	u64	hdri_payload_len_valid           : 1; /* Word 8 Bit 18 */
+	u64	hdri_padding_valid               : 1; /* Word 8 Bit 19 */
+	u64	hdri_endianess                   : 1; /* Word 8 Bit 20 */
+	u64	rt_match                         : 1; /* Word 8 Bit 21 */
+	u64	rt_rule_id                       : 10; /* Word 8 Bits 22-31 */
+	u64	nat_tbl_index                    : 13; /* Word 8 Bits 32-42 */
+	u64	nat_type                         : 2; /* Word 8 Bits 43-44 */
+	u64	hdr_l                            : 1; /* Word 8 Bit 45 */
+	u64	header_offset                    : 10; /* Word 8 Bits 48-57 */
+	u64	filter_process                   : 1; /* Word 8 Bit 58 */
+	u64	filter_result_valid              : 1; /* Word 8 Bit 59 */
+	u64	nat_result_valid                 : 1; /* Word 8 Bit 60 */
+	u64	nat_process                      : 1; /* Word 8 Bit 61 */
+	u64	urg                              : 1; /* Word 8 Bit 62 */
+	u64	cwr                              : 1; /* Word 8 Bit 63 */
+	u64	push                             : 1; /* Word 9 Bit 0 */
+	u64	rst                              : 1; /* Word 9 Bit 1 */
+	u64	ip_checksum_fix                  : 1; /* Word 9 Bit 2 */
+	u64	tport_checksum_fix               : 1; /* Word 9 Bit 3 */
+	u64	ack                              : 1; /* Word 9 Bit 4 */
+	u64	tcp_data_offset                  : 4; /* Word 9 Bits 5-8 */
+	u64	router_process                   : 1; /* Word 9 Bit 9 */
+	u64	frag_router_aggr_fc              : 1; /* Word 9 Bit 10 */
+	u64	frag_exception                   : 1; /* Word 9 Bit 11 */
+	u64	bearer_context_index             : 2; /* Word 9 Bits 12-13 */
+	u64	dcph_cfg_size                    : 16; /* Word 9 Bits 14-29 */
+	u64	bearer_cfg_count                 : 32; /* Word 9 Bits 30-61 */
+	u64	maci_size                        : 2; /* Word 9 Bits 62-63 */
+	u64	tag_info                         : 48; /* Word 10 Bits 0-47 */
+	u64	ucp_cmd_id                       : 16; /* Word 10 Bits 48-63 */
+	u64	metadata                         : 32; /* Word 11 Bits 0-31 */
+	u64	ucp_cmd_params                   : 32; /* Word 11 Bits 32-63 */
+	u64	frag_nat_ip_address              : 32; /* Word 12 Bits 0-31 */
+	u64	frag_nat_ip_cs_diff              : 16; /* Word 12 Bits 32-47 */
+	u64	ulso_ipv4_id_mode                : 2; /* Word 12 Bits 48-49 */
+	u64	ulso_udp_checksum_zero           : 1; /* Word 12 Bit 50 */
+	u64	ulso_frame_valid                 : 1; /* Word 12 Bit 51 */
+	u64	not_used7                        : 1; /* Word 12 Bit 52 */
+	u64	frag_nat_type                    : 2; /* Word 12 Bits 53-54 */
+	u64	fragr_fr_ret                     : 1; /* Word 12 Bit 55 */
+	u64	frag_protocol                    : 8; /* Word 12 Bits 56-63 */
+	u64	frag_src_ip_address              : 32; /* Word 13 Bits 0-31 */
+	u64	frag_dest_ip_address             : 32; /* Word 13 Bits 32-63 */
+	u64	router_stats_index               : 8; /* Word 14 Bits 0-7 */
+	u64	filter_stats_index               : 8; /* Word 14 Bits 8-15 */
+	u64	frag_filter_aggr_fc              : 1; /* Word 14 Bit 16 */
+	u64	close_deafault                   : 1; /* Word 14 Bit 17 */
+	u64	close_vp_after_value             : 8; /* Word 14 Bits 18-25 */
+	u64	close_vp_before_value            : 8; /* Word 14 Bits 26-33 */
+	u64	close_vp_after                   : 1; /* Word 14 Bit 34 */
+	u64	inc_ipv4_id                      : 1; /* Word 14 Bit 35 */
+	u64	open_vp                          : 1; /* Word 14 Bit 36 */
+	u64	frag_hdr_l                       : 1; /* Word 14 Bit 37*/
+	u64	frag_header_offset               : 10; /* Word 14 Bits 38-47 */
+	u64	frag_id                          : 16; /* Word 14 Bits 48-63 */
+	u64	metadata_pre_nat                 : 32; /* Word 15 Bits 0-31 */
+	u64	ipv4_cs_without_total_len        : 16; /* Word 15 Bits 32-47 */
+	u64	frag_router_stats_index          : 8; /* Word 15 Bits 48-55 */
+	u64	frag_filter_stats_index          : 8; /* Word 15 Bits 56-63 */
+	u64	bearer_id                        : 8; /* Word 16 Bits 0-7 */
+	u64	rt_table_index                   : 8; /* Word 16 Bits 8-15 */
+	u64	hdri_offset_padding_total_length : 8; /* Word 16 Bits 16-23 */
+	u64	hdri_offset_payload_len          : 8; /* Word 16 Bits 24-31 */
+	u64	hdri_dst_len                     : 8; /* Word 16 Bits 32-39 */
+	u64	hdri_additional_const_length     : 8; /* Word 16 Bits 40-47 */
+	u64	ulso_mss                         : 16; /* Word 16 Bits 48-63 */
+	u64	maci_calculated_lsbs             : 64; /* Word 17 Bits 0-63 */
+	u64	maci_calculated_msbs             : 64; /* Word 18 Bits 0-63 */
+	u64	padding_bytes_cnt                : 16; /* Word 19 Bits 0-16 */
 } __packed;
 
 #endif /* #if !defined(_IPA_PKT_CNTXT_H_) */

+ 199 - 66
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -56,7 +56,7 @@
 #endif
 
 #define DRV_NAME "ipa"
-
+#define DELAY_BEFORE_FW_LOAD 500
 #define IPA_SUBSYSTEM_NAME "ipa_fws"
 #define IPA_UC_SUBSYSTEM_NAME "ipa_uc"
 
@@ -141,6 +141,7 @@ static void ipa3_free_pkt_init_ex(void);
 
 static void ipa3_load_ipa_fw(struct work_struct *work);
 static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
+static DECLARE_DELAYED_WORK(ipa3_fw_load_failure_handle, ipa3_load_ipa_fw);
 
 static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
 static DECLARE_DELAYED_WORK(ipa_dec_clients_disable_clks_on_wq_work,
@@ -637,27 +638,6 @@ static int ipa3_clean_mhip_dl_rule(void)
 	return 0;
 }
 
-static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
-		unsigned long event, void *ptr)
-{
-	if (ipa3_ctx != NULL)
-	{
-		if (ipa3_ctx->is_device_crashed)
-			return NOTIFY_DONE;
-		ipa3_ctx->is_device_crashed = true;
-	}
-
-	ipa3_active_clients_log_print_table(active_clients_table_buf,
-			IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
-	IPAERR("%s\n", active_clients_table_buf);
-
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block ipa3_active_clients_panic_blk = {
-	.notifier_call  = ipa3_active_clients_panic_notifier,
-};
-
 #ifdef CONFIG_IPA_DEBUG
 static int ipa3_active_clients_log_insert(const char *string)
 {
@@ -709,9 +689,6 @@ static int ipa3_active_clients_log_init(void)
 	ipa3_ctx->ipa3_active_clients_logging.log_tail =
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
 	hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
-	/* 2nd ipa3_active_clients_panic_notifier */
-	atomic_notifier_chain_register(&panic_notifier_list,
-			&ipa3_active_clients_panic_blk);
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = true;
 
 	return 0;
@@ -2372,16 +2349,7 @@ static int proc_sram_info_rqst(
 	return 0;
 }
 
-static void ipa3_mac_flt_list_free_cb(void *buff, u32 len, u32 type)
-{
-	if (!buff) {
-		IPAERR("Null buffer\n");
-		return;
-	}
-	kfree(buff);
-}
-
-static void ipa3_pkt_threshold_free_cb(void *buff, u32 len, u32 type)
+static void ipa3_general_free_cb(void *buff, u32 len, u32 type)
 {
 	if (!buff) {
 		IPAERR("Null buffer\n");
@@ -2415,7 +2383,7 @@ static int ipa3_send_mac_flt_list(unsigned long usr_param)
 		((struct ipa_ioc_mac_client_list_type *)buff)->flt_state);
 
 	retval = ipa3_send_msg(&msg_meta, buff,
-		ipa3_mac_flt_list_free_cb);
+		ipa3_general_free_cb);
 	if (retval) {
 		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
 		retval,
@@ -2479,7 +2447,7 @@ static int ipa3_send_pkt_threshold(unsigned long usr_param)
 		((struct ipa_set_pkt_threshold *)buff2)->pkt_threshold);
 
 	retval = ipa3_send_msg(&msg_meta, buff2,
-		ipa3_pkt_threshold_free_cb);
+		ipa3_general_free_cb);
 	if (retval) {
 		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
 		retval,
@@ -2543,7 +2511,7 @@ static int ipa3_send_sw_flt_list(unsigned long usr_param)
 		((struct ipa_sw_flt_list_type *)buff)->iface_enable);
 
 	retval = ipa3_send_msg(&msg_meta, buff,
-		ipa3_mac_flt_list_free_cb);
+		ipa3_general_free_cb);
 	if (retval) {
 		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
 		retval,
@@ -2602,7 +2570,7 @@ static int ipa3_send_ippt_sw_flt_list(unsigned long usr_param)
 		((struct ipa_ippt_sw_flt_list_type *)buff)->port_enable);
 
 	retval = ipa3_send_msg(&msg_meta, buff,
-		ipa3_mac_flt_list_free_cb);
+		ipa3_general_free_cb);
 	if (retval) {
 		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
 		retval,
@@ -2613,6 +2581,46 @@ static int ipa3_send_ippt_sw_flt_list(unsigned long usr_param)
 	return 0;
 }
 
+/**
+ * ipa3_send_macsec_info() - Pass macsec mapping to the IPACM
+ * @event_type: Type of the event - UP or DOWN
+ * @map: pointer to macsec to eth mapping structure
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_send_macsec_info(enum ipa_macsec_event event_type, struct ipa_macsec_map *map)
+{
+	struct ipa_msg_meta msg_meta;
+	int res = 0;
+
+	if (!map) {
+		IPAERR("Bad arg: info is NULL\n");
+		res = -EIO;
+		goto done;
+	}
+
+	/*
+	 * Prep and send msg to ipacm
+	 */
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = event_type;
+	msg_meta.msg_len  = sizeof(struct ipa_macsec_map);
+
+	/*
+	 * Post event to ipacm
+	 */
+	res = ipa3_send_msg(&msg_meta, map, ipa3_general_free_cb);
+
+	if (res) {
+		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		kfree(map);
+		goto done;
+	}
+
+done:
+	return res;
+}
+
 static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	int retval = 0;
@@ -2631,6 +2639,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	struct ipa_ioc_get_vlan_mode vlan_mode;
 	struct ipa_ioc_wigig_fst_switch fst_switch;
 	struct ipa_ioc_eogre_info eogre_info;
+	struct ipa_ioc_macsec_info macsec_info;
+	struct ipa_macsec_map *macsec_map;
 	bool send2uC, send2ipacm;
 	size_t sz;
 	int pre_entry;
@@ -4013,6 +4023,47 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			IPAERR("ipa_flt_sram_set_client_prio_high failed! retval=%d\n", retval);
 		break;
 #endif
+
+	case IPA_IOC_ADD_MACSEC_MAPPING:
+	case IPA_IOC_DEL_MACSEC_MAPPING:
+		IPADBG("Got %s\n", cmd == IPA_IOC_ADD_MACSEC_MAPPING ?
+			"IPA_IOC_ADD_MACSEC_MAPPING" : "IPA_IOC_DEL_MACSEC_MAPPING");
+		if (copy_from_user(&macsec_info, (const void __user *) arg,
+			sizeof(struct ipa_ioc_macsec_info))) {
+			IPAERR_RL("copy_from_user for ipa_ioc_macsec_info fails\n");
+			retval = -EFAULT;
+			break;
+		}
+
+		/* Validate the input */
+		if (macsec_info.ioctl_data_size != sizeof(struct ipa_macsec_map)) {
+			IPAERR_RL("data size missmatch\n");
+			retval = -EFAULT;
+			break;
+		}
+
+		macsec_map = kzalloc(sizeof(struct ipa_macsec_map), GFP_KERNEL);
+		if (!macsec_map) {
+			IPAERR("macsec_map memory allocation failed !\n");
+			retval = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(macsec_map, (const void __user *)(macsec_info.ioctl_ptr),
+			sizeof(struct ipa_macsec_map))) {
+			IPAERR_RL("copy_from_user for ipa_macsec_map fails\n");
+			retval = -EFAULT;
+			kfree(macsec_map);
+			break;
+		}
+
+		/* Send message to the IPACM */
+		ipa3_send_macsec_info(
+			(cmd == IPA_IOC_ADD_MACSEC_MAPPING) ?
+			IPA_MACSEC_ADD_EVENT : IPA_MACSEC_DEL_EVENT,
+			macsec_map);
+		break;
+
 	default:
 		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 		return -ENOTTY;
@@ -4963,8 +5014,7 @@ static int ipa3_q6_set_ex_path_to_apps(void)
 	}
 
 	/* Will wait 500msecs for IPA tag process completion */
-	retval = ipa3_tag_process(desc, num_descs,
-		msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+	retval = ipa3_tag_process(desc, num_descs, CLEANUP_TAG_PROCESS_TIMEOUT);
 	if (retval) {
 		IPAERR("TAG process failed! (error %d)\n", retval);
 		/* For timeout error ipa3_destroy_imm cb will destroy user1 */
@@ -6889,7 +6939,8 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
 			pipe_bitmask |= bmsk;
 		bmsk = bmsk << 1;
 
-		if ((i % IPA_EP_PER_REG) == (ep_per_reg - 1)) {
+		if ((i % IPA_EP_PER_REG) == (ep_per_reg - 1)
+			|| (i == ipa3_ctx->ipa_num_pipes - 1)) {
 			IPADBG("interrupt data: %u\n", suspend_data[j]);
 			res = ipa_pm_handle_suspend(pipe_bitmask, j);
 			if (res) {
@@ -7074,6 +7125,7 @@ static int ipa3_panic_notifier(struct notifier_block *this,
 	{
 		if (ipa3_ctx->is_device_crashed)
 			return NOTIFY_DONE;
+		ipa3_ctx->is_device_crashed = true;
 	}
 
 	ipa3_freeze_clock_vote_and_notify_modem();
@@ -7094,6 +7146,10 @@ static int ipa3_panic_notifier(struct notifier_block *this,
 		ipa_wigig_save_regs();
 	}
 
+	ipa3_active_clients_log_print_table(active_clients_table_buf,
+			IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+	IPAERR("%s\n", active_clients_table_buf);
+
 	return NOTIFY_DONE;
 }
 
@@ -7288,6 +7344,8 @@ static inline void ipa3_register_to_fmwk(void)
 		ipa3_register_notifier;
 	data.ipa_unregister_notifier =
 		ipa3_unregister_notifier;
+	data.ipa_add_socksv5_conn = ipa3_add_socksv5_conn;
+	data.ipa_del_socksv5_conn = ipa3_del_socksv5_conn;
 
 	if (ipa_fmwk_register_ipa(&data)) {
 		IPAERR("couldn't register to IPA framework\n");
@@ -7713,6 +7771,10 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 		IPADBG("register to fmwk\n");
 		ipa3_register_to_fmwk();
 	}
+
+	/* init uc-activation tbl*/
+	ipa3_setup_uc_act_tbl();
+
 	complete_all(&ipa3_ctx->init_completion_obj);
 
 	ipa_ut_module_init();
@@ -7933,11 +7995,14 @@ static void ipa3_load_ipa_fw(struct work_struct *work)
 	IPADBG("Entry\n");
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
+	
 	result = ipa3_attach_to_smmu();
 	if (result) {
 		IPAERR("IPA attach to smmu failed %d\n", result);
 		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+			&ipa3_fw_load_failure_handle,
+			msecs_to_jiffies(DELAY_BEFORE_FW_LOAD));
 		return;
 	}
 
@@ -7965,13 +8030,18 @@ static void ipa3_load_ipa_fw(struct work_struct *work)
 		result = ipa3_manual_load_ipa_fws();
 	}
 
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 	if (result) {
-		IPAERR("IPA FW loading process has failed result=%d\n",
-			result);
+
+		ipa3_ctx->ipa_pil_load++;
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		IPADBG("IPA firmware loading deffered to a work queue\n");
+		queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+			&ipa3_fw_load_failure_handle,
+			msecs_to_jiffies(DELAY_BEFORE_FW_LOAD));
 		return;
 	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	mutex_lock(&ipa3_ctx->fw_load_data.lock);
 	ipa3_ctx->fw_load_data.state = IPA_FW_LOAD_STATE_LOADED;
 	mutex_unlock(&ipa3_ctx->fw_load_data.lock);
@@ -8040,7 +8110,7 @@ static void ipa_fw_load_sm_handle_event(enum ipa_fw_load_event ev)
 		if (ipa3_ctx->fw_load_data.state == IPA_FW_LOAD_STATE_INIT) {
 			ipa3_ctx->fw_load_data.state =
 				IPA_FW_LOAD_STATE_SMMU_DONE;
-			goto out;
+			goto sched_fw_load;
 		}
 		if (ipa3_ctx->fw_load_data.state ==
 			IPA_FW_LOAD_STATE_FWFILE_READY) {
@@ -8648,6 +8718,9 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->mpm_ring_size_dl = DEFAULT_MPM_RING_SIZE_DL;
 	ipa3_ctx->mpm_teth_aggr_size = DEFAULT_MPM_TETH_AGGR_SIZE;
 	ipa3_ctx->mpm_uc_thresh = DEFAULT_MPM_UC_THRESH_SIZE;
+	ipa3_ctx->uc_act_tbl_valid = false;
+	ipa3_ctx->uc_act_tbl_total = 0;
+	ipa3_ctx->uc_act_tbl_next_index = 0;
 
 	if (resource_p->gsi_fw_file_name) {
 		ipa3_ctx->gsi_fw_file_name =
@@ -8875,7 +8948,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	if (!ipa3_ctx->power_mgmt_wq) {
 		IPAERR("failed to create power mgmt wq\n");
 		result = -ENOMEM;
-		goto fail_init_hw;
+		goto fail_gsi_map;
 	}
 
 	ipa3_ctx->transport_power_mgmt_wq =
@@ -9009,6 +9082,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 
 	mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
 	mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
+	mutex_init(&ipa3_ctx->act_tbl_lock);
 
 	idr_init(&ipa3_ctx->ipa_idr);
 	spin_lock_init(&ipa3_ctx->idr_lock);
@@ -9155,6 +9229,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 
 	mutex_init(&ipa3_ctx->app_clock_vote.mutex);
 	ipa3_ctx->is_modem_up = false;
+	ipa3_ctx->mhi_ctrl_state = IPA_MHI_CTRL_NOT_SETUP;
 
 	return 0;
 
@@ -9210,8 +9285,6 @@ fail_flt_rule_cache:
 	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
 fail_create_transport_wq:
 	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
-fail_init_hw:
-	gsi_unmap_base();
 fail_gsi_map:
 	if (ipa3_ctx->reg_collection_base)
 		iounmap(ipa3_ctx->reg_collection_base);
@@ -9219,13 +9292,17 @@ fail_gsi_map:
 fail_remap:
 	ipa3_disable_clks();
 	ipa3_active_clients_log_destroy();
+	gsi_unmap_base();
 fail_init_active_client:
 	if (ipa3_clk)
 		clk_put(ipa3_clk);
 	ipa3_clk = NULL;
 fail_bus_reg:
 	for (i = 0; i < ipa3_ctx->icc_num_paths; i++)
-		if (ipa3_ctx->ctrl->icc_path[i]) {
+		if (IS_ERR_OR_NULL(ipa3_ctx->ctrl->icc_path[i])) {
+			ipa3_ctx->ctrl->icc_path[i] = NULL;
+			break;
+		} else {
 			icc_put(ipa3_ctx->ctrl->icc_path[i]);
 			ipa3_ctx->ctrl->icc_path[i] = NULL;
 		}
@@ -9237,8 +9314,10 @@ fail_mem_ctrl:
 	kfree(ipa3_ctx->ipa_tz_unlock_reg);
 	ipa3_ctx->ipa_tz_unlock_reg = NULL;
 fail_tz_unlock_reg:
-	if (ipa3_ctx->logbuf)
+	if (ipa3_ctx->logbuf) {
 		ipc_log_context_destroy(ipa3_ctx->logbuf);
+		ipa3_ctx->logbuf = NULL;
+	}
 fail_uc_file_alloc:
 	kfree(ipa3_ctx->gsi_fw_file_name);
 	ipa3_ctx->gsi_fw_file_name = NULL;
@@ -10281,6 +10360,7 @@ static int ipa_smmu_perph_cb_probe(struct device *dev,
 		}
 	}
 
+	cb->done = true;
 	return 0;
 }
 
@@ -10374,10 +10454,35 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
 	ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = (bypass != 0);
 
 	ipa3_ctx->uc_pdev = dev;
-
+	cb->done = true;
 	return 0;
 }
 
+static void ipa3_ap_iommu_unmap(struct ipa_smmu_cb_ctx *cb, const u32 *add_map, u32 add_map_size) {
+
+	int i, res;
+
+	/* iterate of each entry of the additional mapping array */
+	for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+		u32 iova = be32_to_cpu(add_map[i]);
+		u32 pa = be32_to_cpu(add_map[i + 1]);
+		u32 size = be32_to_cpu(add_map[i + 2]);
+		unsigned long iova_p;
+		phys_addr_t pa_p;
+		u32 size_p;
+
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+			iova_p, pa_p, size_p);
+		IPADBG_LOW("unmapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+
+		res = iommu_unmap(cb->iommu_domain,iova_p, size_p);
+		if(res != size_p) {
+			pr_err("iommu unmap failed for AP cb\n");
+			ipa_assert();
+		}
+	}
+}
 static int ipa_smmu_ap_cb_probe(struct device *dev)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
@@ -10526,6 +10631,8 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 		if (ret < 0 && ret != -EEXIST) {
 			IPAERR("unable to allocate smem MODEM entry\n");
 			cb->valid = false;
+			if(add_map)
+				ipa3_ap_iommu_unmap(cb, add_map, add_map_size);
 			return -EFAULT;
 		}
 		smem_addr = qcom_smem_get(SMEM_MODEM,
@@ -10534,6 +10641,8 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 		if (IS_ERR(smem_addr)) {
 			IPAERR("unable to acquire smem MODEM entry\n");
 			cb->valid = false;
+			if(add_map)
+				ipa3_ap_iommu_unmap(cb, add_map, add_map_size);
 			return -EFAULT;
 		}
 		if (smem_size != ipa_smem_size)
@@ -10554,6 +10663,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
 
 	smmu_info.present[IPA_SMMU_CB_AP] = true;
 
+	cb->done = true;
 	ipa3_ctx->pdev = dev;
 	cb->next_addr = cb->va_end;
 
@@ -10618,14 +10728,21 @@ static int ipa_smmu_11ad_cb_probe(struct device *dev)
 		IPADBG("11AD using shared CB\n");
 		cb->shared = true;
 	}
-
+	cb->done = true;
 	return 0;
 }
 
 static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
 {
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(cb_type);
+
+	if((cb != NULL) && (cb->done == true)) {
+		IPADBG("SMMU CB type %d already initialized\n", cb_type);
+		return 0;
+	}
 	switch (cb_type) {
 	case IPA_SMMU_CB_AP:
+		ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
 		return ipa_smmu_ap_cb_probe(dev);
 	case IPA_SMMU_CB_WLAN:
 	case IPA_SMMU_CB_WLAN1:
@@ -10633,6 +10750,7 @@ static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
 	case IPA_SMMU_CB_ETH1:
 		return ipa_smmu_perph_cb_probe(dev, cb_type);
 	case IPA_SMMU_CB_UC:
+		ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
 		return ipa_smmu_uc_cb_probe(dev);
 	case IPA_SMMU_CB_11AD:
 		return ipa_smmu_11ad_cb_probe(dev);
@@ -10647,18 +10765,19 @@ static int ipa3_attach_to_smmu(void)
 	struct ipa_smmu_cb_ctx *cb;
 	int i, result;
 
-	ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
-	ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
-
 	if (smmu_info.arm_smmu) {
 		IPADBG("smmu is enabled\n");
 		for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
 			cb = ipa3_get_smmu_ctx(i);
 			result = ipa_smmu_cb_probe(cb->dev, i);
-			if (result)
+			if (result) {
 				IPAERR("probe failed for cb %d\n", i);
+				return result;
+			}
 		}
 	} else {
+		ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
+		ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
 		IPADBG("smmu is disabled\n");
 	}
 	return 0;
@@ -10739,7 +10858,6 @@ static int ipa_smmu_update_fw_loader(void)
 			ipa3_ctx->num_smmu_cb_probed ==
 			ipa3_ctx->max_num_smmu_cb) {
 			IPADBG("All %d CBs probed\n", IPA_SMMU_CB_MAX);
-			ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
 
 			if (ipa3_ctx->use_xbl_boot) {
 				IPAERR("Using XBL boot load for IPA FW\n");
@@ -10759,6 +10877,9 @@ static int ipa_smmu_update_fw_loader(void)
 					IPAERR("IPA post init failed %d\n", result);
 					return result;
 				}
+			} else {
+
+				ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
 			}
 		}
 	} else {
@@ -10927,27 +11048,38 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 
 	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
 		if (of_property_read_bool(pdev_p->dev.of_node,
-			"qcom,use-64-bit-dma-mask"))
+			"qcom,use-64-bit-dma-mask")) {
 			smmu_info.use_64_bit_dma_mask = true;
+			if (dma_set_mask_and_coherent(&pdev_p->dev, DMA_BIT_MASK(64))) {
+				IPAERR("DMA set 64bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		}
 		smmu_info.arm_smmu = true;
 	} else {
 		if (of_property_read_bool(pdev_p->dev.of_node,
 			"qcom,use-64-bit-dma-mask")) {
-			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
-			    dma_set_coherent_mask(&pdev_p->dev,
-			    DMA_BIT_MASK(64))) {
+			if (dma_set_mask_and_coherent(&pdev_p->dev, DMA_BIT_MASK(64))) {
 				IPAERR("DMA set 64bit mask failed\n");
 				return -EOPNOTSUPP;
 			}
 		} else {
-			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
-			    dma_set_coherent_mask(&pdev_p->dev,
-			    DMA_BIT_MASK(32))) {
+			if (dma_set_mask_and_coherent(&pdev_p->dev, DMA_BIT_MASK(32))) {
 				IPAERR("DMA set 32bit mask failed\n");
 				return -EOPNOTSUPP;
 			}
 		}
+		/* Below update of pre init for non smmu device, As
+		 * existing flow initialzies only for smmu
+		 * enabled node.*/
+
+		result = ipa3_pre_init(&ipa3_res, pdev_p);
+		if (result) {
+			IPAERR("ipa3_init failed\n");
+			return result;
+		}
 		ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_SMMU_DONE);
+		goto skip_repeat_pre_init;
 	}
 
 	/* Proceed to real initialization */
@@ -10957,6 +11089,7 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p)
 		return result;
 	}
 
+skip_repeat_pre_init:
 	result = of_platform_populate(pdev_p->dev.of_node,
 		ipa_plat_drv_match, NULL, &pdev_p->dev);
 	if (result) {

+ 107 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -100,6 +100,7 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys);
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_first_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_work_func(struct work_struct *work);
 static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys);
@@ -1523,7 +1524,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 			ipa3_ctx->ipa_wan_skb_page) {
 			ipa3_replenish_rx_page_recycle(ep->sys);
 		} else
-			ipa3_replenish_rx_cache(ep->sys);
+			ipa3_first_replenish_rx_cache(ep->sys);
 		for (i = 0; i < GSI_VEID_MAX; i++)
 			INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
 	}
@@ -2822,6 +2823,111 @@ fail_kmem_cache_alloc:
 	return;
 }
 
+/**
+ * ipa3_first_replenish_rx_cache() - Replenish the Rx packets cache for the first time.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ */
+static void ipa3_first_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int idx = 0;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	/* start replenish only when buffers go lower than the threshold */
+	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
+		return;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc cache\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+			IPAERR("dma_map_single failure %pK for %pK\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
+		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
+		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
+		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
+		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
+		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
+		idx++;
+		rx_len_cached++;
+		/*
+		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
+		 * If this size is reached we need to queue the xfers.
+		 */
+		if (idx == IPA_REPL_XFER_MAX) {
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+				gsi_xfer_elem_array, false);
+			if (ret != GSI_STATUS_SUCCESS) {
+				/* we don't expect this will happen */
+				IPAERR("failed to provide buffer: %d\n", ret);
+				WARN_ON(1);
+				break;
+			}
+			idx = 0;
+		}
+	}
+	goto done;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	/* Ensuring minimum buffers are submitted to HW */
+	if (rx_len_cached < IPA_REPL_XFER_THRESH) {
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+		return;
+	}
+done:
+	/* only ring doorbell once here */
+	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
+		gsi_xfer_elem_array, true);
+	if (ret == GSI_STATUS_SUCCESS) {
+		sys->len = rx_len_cached;
+	} else {
+		/* we don't expect this will happen */
+		IPAERR("failed to provide buffer: %d\n", ret);
+		WARN_ON(1);
+	}
+}
 
 /**
  * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_flt.c

@@ -223,6 +223,9 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
 			/* only body (no header) */
 			tbl_mem.size = tbl->sz[rlt] -
 				ipahal_get_hw_tbl_hdr_width();
+			/* Add prefetech buf size. */
+			tbl_mem.size +=
+				ipahal_get_hw_prefetch_buf_size();
 			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
 				IPAERR("fail to alloc sys tbl of size %d\n",
 					tbl_mem.size);

+ 38 - 6
drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c

@@ -543,12 +543,13 @@ bad_len:
 static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user,
 	struct ipa3_hdr_entry **entry_out)
 {
-	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_entry *entry, *entry_t, *next;
 	struct ipa_hdr_offset_entry *offset = NULL;
 	u32 bin;
 	struct ipa3_hdr_tbl *htbl;
 	int id;
 	int mem_size;
+	enum hdr_tbl_storage hdr_tbl_loc;
 
 	if (hdr->hdr_len > IPA_HDR_MAX_SIZE) {
 		IPAERR_RL("bad param\n");
@@ -579,6 +580,26 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user,
 			 (entry->is_partial || (hdr->status == IPA_HDR_TO_DDR_PATTERN))) ||
 			 !IPA_MEM_PART(apps_hdr_size)) ? false : true;
 
+	/* check to see if adding header entry with duplicate name */
+	for (hdr_tbl_loc = HDR_TBL_LCL; hdr_tbl_loc < HDR_TBLS_TOTAL; hdr_tbl_loc++) {
+		list_for_each_entry_safe(entry_t, next,
+			&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_hdr_entry_list, link) {
+
+			/* return if adding the same name */
+			if (!strcmp(entry_t->name, entry->name) && (user == true)) {
+				IPAERR("IPACM Trying to add hdr %s len=%d, duplicate entry, return old one\n",
+					entry->name, entry->hdr_len);
+
+				/* return the original entry */
+				if (entry_out)
+					*entry_out = entry_t;
+
+				kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+				return 0;
+			}
+		}
+	}
+
 	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
 		bin = IPA_HDR_BIN0;
 	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
@@ -616,11 +637,20 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user,
 				mem_size = IPA_MEM_PART(apps_hdr_size_ddr);
 				entry->is_lcl = false;
 			} else {
-				/* if the entry is intended to be in DDR,
-				   and there is no space -> error */
-				IPAERR("No space in DDR header buffer! Requested: %d Left: %d\n",
-				       ipa_hdr_bin_sz[bin], mem_size - htbl->end);
-				goto bad_hdr_len;
+				/* check if DDR free list */
+				if (list_empty(&htbl->head_free_offset_list[bin])) {
+					IPAERR("No space in DDR header buffer! Requested: %d Left: %d name %s, end %d\n",
+						ipa_hdr_bin_sz[bin], mem_size - htbl->end, entry->name, htbl->end);
+					goto bad_hdr_len;
+				} else {
+					/* get the first free slot */
+					offset = list_first_entry(&htbl->head_free_offset_list[bin],
+						struct ipa_hdr_offset_entry, link);
+					list_move(&offset->link, &htbl->head_offset_list[bin]);
+					entry->offset_entry = offset;
+					offset->ipacm_installed = user;
+					goto free_list;
+				}
 			}
 		}
 		offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
@@ -650,6 +680,8 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user,
 		offset->ipacm_installed = user;
 	}
 
+free_list:
+
 	list_add(&entry->link, &htbl->head_hdr_entry_list);
 	htbl->hdr_cnt++;
 	IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d to %s table\n",

+ 34 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -344,6 +344,8 @@ enum {
 
 #define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 4096
 
+#define IPA_UC_ACT_TBL_SIZE 1000
+
 #define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
 #define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
 #define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
@@ -564,6 +566,12 @@ enum ipa_icc_type {
 
 #define IPA_ICC_MAX (IPA_ICC_PATH_MAX*IPA_ICC_TYPE_MAX)
 
+
+#define IPA_MHI_CTRL_NOT_SETUP (0)
+#define IPA_MHI_CTRL_UL_SETUP (1 << 1)
+#define IPA_MHI_CTRL_DL_SETUP (1 << 2)
+#define IPA_MHI_CTRL_SETUP_ALL (IPA_MHI_CTRL_UL_SETUP | IPA_MHI_CTRL_DL_SETUP)
+
 /**
  * struct  ipa_rx_page_data - information needed
  * to send to wlan driver on receiving data from ipa hw
@@ -610,6 +618,7 @@ struct ipa_smmu_cb_ctx {
 	u32 va_end;
 	bool shared;
 	bool is_cache_coherent;
+	bool done;
 };
 
 /**
@@ -2138,6 +2147,7 @@ struct ipa_ntn3_client_stats {
  * @eth_info: ethernet client mapping
  * @max_num_smmu_cb: number of smmu s1 cb supported
  * @non_hash_flt_lcl_sys_switch: number of times non-hash flt table moved
+ * mhi_ctrl_state: state of mhi ctrl pipes
  */
 struct ipa3_context {
 	struct ipa3_char_device_context cdev;
@@ -2372,6 +2382,14 @@ struct ipa3_context {
 	bool buff_above_thresh_for_coal_pipe_notified;
 	bool buff_below_thresh_for_def_pipe_notified;
 	bool buff_below_thresh_for_coal_pipe_notified;
+	u8 mhi_ctrl_state;
+	struct ipa_mem_buffer uc_act_tbl;
+	bool uc_act_tbl_valid;
+	struct mutex act_tbl_lock;
+	int uc_act_tbl_total;
+	int uc_act_tbl_next_index;
+	int ipa_pil_load;
+
 };
 
 struct ipa3_plat_drv_res {
@@ -2784,6 +2802,12 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
 
 int ipa3_cfg_ep_ulso(u32 clnt_hdl, const struct ipa_ep_cfg_ulso *ep_ulso);
 
+int ipa3_setup_uc_act_tbl(void);
+
+int ipa3_add_socksv5_conn(struct ipa_socksv5_info *info);
+
+int ipa3_del_socksv5_conn(uint32_t handle);
+
 /*
  * Header removal / addition
  */
@@ -3593,4 +3617,14 @@ int ipa3_send_eogre_info(
 	enum ipa_eogre_event etype,
 	struct ipa_ioc_eogre_info *info );
 
+/* update mhi ctrl pipe state */
+void ipa3_update_mhi_ctrl_state(u8 state, bool set);
+/* Send MHI endpoint info to modem using QMI indication message */
+int ipa_send_mhi_endp_ind_to_modem(void);
+
+/*
+ * To pass macsec mapping to the IPACM
+ */
+int ipa3_send_macsec_info(enum ipa_macsec_event event_type, struct ipa_macsec_map *map);
+
 #endif /* _IPA3_I_H_ */

+ 1 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c

@@ -144,6 +144,7 @@ static const struct mhi_device_id mhi_driver_match_table[] = {
 	{ .chan = "IP_HW_MHIP_0" }, /* for rndis/Wifi teth pipes */
 	{ .chan = "IP_HW_MHIP_1" }, /* for MHIP rmnet */
 	{ .chan = "IP_HW_ADPL" }, /* ADPL/ODL DL pipe */
+	{},
 };
 
 static const char *ipa_mpm_mhip_chan_str[IPA_MPM_MHIP_CH_ID_MAX] = {

+ 5 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_odl.c

@@ -372,6 +372,11 @@ int ipa3_odl_pipe_open(void)
 		return 0;
 	}
 
+	if (atomic_read(&ipa3_ctx->is_ssr)) {
+		IPAERR("SSR in progress ODL pipe configuration not allowed\n");
+		return 0;
+	}
+
 	memset(&holb_cfg, 0, sizeof(holb_cfg));
 	holb_cfg.tmr_val = 0;
 	holb_cfg.en = 1;

+ 44 - 9
drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c

@@ -932,20 +932,39 @@ int ipa3_qmi_filter_request_ex_send(
 	if (req->filter_spec_ex_list_len == 0) {
 		IPAWANDBG("IPACM pass zero rules to Q6\n");
 	} else {
-		IPAWANDBG("IPACM pass %u rules to Q6\n",
-		req->filter_spec_ex_list_len);
+		IPAWANDBG(
+		"IPACM pass %u rule to Q6\n",req->filter_spec_ex_list_len);
 	}
-
-	if (req->filter_spec_ex_list_len >= QMI_IPA_MAX_FILTERS_EX_V01) {
+	if (req->filter_spec_ex_list_valid && req->filter_spec_ex_list_len >
+					QMI_IPA_MAX_FILTERS_EX_V01) {
 		IPAWANDBG(
 		"IPACM pass the number of filtering rules exceed limit\n");
 		return -EINVAL;
 	} else if (req->source_pipe_index_valid != 0) {
 		IPAWANDBG(
-		"IPACM passes source_pipe_index_valid not zero 0 != %d\n",
+		"IPACM passes source_pipe_index_valid not zero 0 !=%d\n",
 			req->source_pipe_index_valid);
 		return -EINVAL;
 	}
+	if (req->xlat_filter_indices_list_valid &&
+		(req->xlat_filter_indices_list_len >
+				QMI_IPA_MAX_FILTERS_EX_V01)) {
+		IPAWANDBG(
+		"IPACM pass the number of filtering rules exceed limit\n");
+		return -EINVAL;
+	}
+	if (req->filter_spec_ex2_list_valid &&
+		(req->filter_spec_ex2_list_len > QMI_IPA_MAX_FILTERS_V01)) {
+		IPAWANDBG(
+		"IPACM pass the number of filtering rules exceed limit\n");
+		return -EINVAL;
+	}
+	if (req->ul_firewall_indices_list_valid &&
+		(req->ul_firewall_indices_list_len > QMI_IPA_MAX_FILTERS_V01)) {
+		IPAWANDBG(
+		"IPACM pass the number of filtering rules exceed limit\n");
+		return -EINVAL;
+	}
 
 	for (i = 0; i < req->filter_spec_ex_list_len; i++) {
 		if ((req->filter_spec_ex_list[i].ip_type !=
@@ -977,7 +996,14 @@ int ipa3_qmi_filter_request_ex_send(
 	mutex_unlock(&ipa3_qmi_lock);
 
 	req_desc.max_msg_len = ipa3_qmi_filter_request_ex_calc_length(req);
-	IPAWANDBG("QMI send request length = %d\n", req_desc.max_msg_len);
+	if( req_desc.max_msg_len < 0 ){
+		IPAWANDBG(
+		"QMI send request length = %d\n", req_desc.max_msg_len);
+		return -EINVAL;
+	} else {
+		IPAWANDBG("QMI send request length = %d\n",
+		req_desc.max_msg_len);
+	}
 
 	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01;
 	req_desc.ei_array = ipa3_install_fltr_rule_req_ex_msg_data_v01_ei;
@@ -1746,10 +1772,13 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
 		"ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n");
 		/* Cleanup when ipa3_wwan_remove is called */
 		mutex_lock(&ipa3_qmi_lock);
-		qmi_handle_release(ipa_q6_clnt);
-		vfree(ipa_q6_clnt);
-		ipa_q6_clnt = NULL;
+		if (ipa_q6_clnt != NULL) {
+			qmi_handle_release(ipa_q6_clnt);
+			vfree(ipa_q6_clnt);
+			ipa_q6_clnt = NULL;
+		}
 		mutex_unlock(&ipa3_qmi_lock);
+		IPAWANERR("Exit from service arrive fun\n");
 		return;
 	}
 
@@ -2443,6 +2472,8 @@ int ipa3_qmi_enable_per_client_stats(
 
 	IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
 		&req_desc, req,
 		&resp_desc, resp,
@@ -2480,6 +2511,8 @@ int ipa3_qmi_get_per_client_packet_stats(
 
 	IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
 		&req_desc, req,
 		&resp_desc, resp,
@@ -2537,6 +2570,8 @@ int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req)
 	resp_desc.msg_id = QMI_IPA_MHI_CLEANUP_RESP_V01;
 	resp_desc.ei_array = ipa_mhi_cleanup_resp_msg_v01_ei;
 
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
 	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
 		&req_desc, req,
 		&resp_desc, &resp,

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_rt.c

@@ -170,6 +170,9 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
 			/* only body (no header) */
 			tbl_mem.size = tbl->sz[rlt] -
 				ipahal_get_hw_tbl_hdr_width();
+			/* Add prefetech buf size. */
+			tbl_mem.size +=
+				ipahal_get_hw_prefetch_buf_size();
 			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
 				IPAERR_RL("fail to alloc sys tbl of size %d\n",
 					tbl_mem.size);

+ 374 - 12
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -8489,10 +8489,7 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
 		ep_ctrl->ipa_ep_suspend,
 		ep_ctrl->ipa_ep_delay);
 	ep = &ipa3_ctx->ep[clnt_hdl];
-	if (ep->client == IPA_CLIENT_MHI_LOW_LAT_PROD) {
-		IPAERR("WAR: DON'T SET FLOW CONTROL FOR MHI LOW LAT PIPE\n");
-		return 0;
-	}
+
 	if (ipa3_ctx->ipa_endp_delay_wa_v2 &&
 		IPA_CLIENT_IS_PROD(ep->client)) {
 
@@ -8503,7 +8500,8 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
 		 * AP controlled pipe configuring primary flow control.
 		 */
 		if (ep->client == IPA_CLIENT_USB_PROD ||
-			ep->client == IPA_CLIENT_MHI_PROD)
+			ep->client == IPA_CLIENT_MHI_PROD ||
+			ep->client == IPA_CLIENT_MHI_LOW_LAT_PROD)
 			primary_secondry = true;
 		else
 			primary_secondry = false;
@@ -12350,12 +12348,15 @@ int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base,
 
 	ehdr = (struct elf32_hdr *) firmware->data;
 	ipa_assert_on(!ehdr);
-	if (ehdr->e_phnum != 3) {
+	if (ehdr->e_phnum != 3 && ehdr->e_phnum != 5) {
 		IPAERR("Unexpected number of ELF program headers\n");
 		return -EINVAL;
 	}
+
 	phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
 
+	if (ehdr->e_phnum == 5)
+		phdr = phdr + 2;
 	/*
 	 * Each ELF program header represents a FW image and contains:
 	 *  p_vaddr : The starting address to which the FW needs to loaded.
@@ -12992,7 +12993,7 @@ int ipa3_get_prot_id(enum ipa_client_type client)
 	return prot_id;
 }
 
-void __ipa_ntn3_cons_stats_get(struct ipa_ntn3_stats_rx *stats, enum ipa_client_type client)
+void __ipa_ntn3_prod_stats_get(struct ipa_ntn3_stats_rx *stats, enum ipa_client_type client)
 {
 	int ch_id, ipa_ep_idx;
 
@@ -13017,7 +13018,7 @@ void __ipa_ntn3_cons_stats_get(struct ipa_ntn3_stats_rx *stats, enum ipa_client_
 
 }
 
-void __ipa_ntn3_prod_stats_get(struct ipa_ntn3_stats_tx *stats, enum ipa_client_type client)
+void __ipa_ntn3_cons_stats_get(struct ipa_ntn3_stats_tx *stats, enum ipa_client_type client)
 {
 	int ch_id, ipa_ep_idx;
 
@@ -13045,11 +13046,11 @@ void __ipa_ntn3_prod_stats_get(struct ipa_ntn3_stats_tx *stats, enum ipa_client_
 void ipa_eth_ntn3_get_status(struct ipa_ntn3_client_stats *s, unsigned inst_id)
 {
 	if (inst_id == 0) {
-		__ipa_ntn3_cons_stats_get(&s->rx_stats, IPA_CLIENT_ETHERNET_CONS);
-		__ipa_ntn3_prod_stats_get(&s->tx_stats, IPA_CLIENT_ETHERNET_PROD);
+		__ipa_ntn3_cons_stats_get(&s->tx_stats, IPA_CLIENT_ETHERNET_CONS);
+		__ipa_ntn3_prod_stats_get(&s->rx_stats, IPA_CLIENT_ETHERNET_PROD);
 	} else {
-		__ipa_ntn3_cons_stats_get(&s->rx_stats, IPA_CLIENT_ETHERNET2_CONS);
-		__ipa_ntn3_prod_stats_get(&s->tx_stats, IPA_CLIENT_ETHERNET2_PROD);
+		__ipa_ntn3_cons_stats_get(&s->tx_stats, IPA_CLIENT_ETHERNET2_CONS);
+		__ipa_ntn3_prod_stats_get(&s->rx_stats, IPA_CLIENT_ETHERNET2_PROD);
 	}
 
 }
@@ -13386,3 +13387,364 @@ int ipa3_send_eogre_info(
 done:
 	return res;
 }
+
+/* Send MHI endpoint info to modem using QMI indication message */
+int ipa_send_mhi_endp_ind_to_modem(void)
+{
+	struct ipa_endp_desc_indication_msg_v01 req;
+	struct ipa_ep_id_type_v01 *ep_info;
+	int ipa_mhi_prod_ep_idx =
+		ipa3_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_PROD);
+	int ipa_mhi_cons_ep_idx =
+		ipa3_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_CONS);
+
+	mutex_lock(&ipa3_ctx->lock);
+	/* only modem up and MHI ctrl pipes are ready, then send QMI*/
+	if (!ipa3_ctx->is_modem_up ||
+		ipa3_ctx->mhi_ctrl_state != IPA_MHI_CTRL_SETUP_ALL) {
+		mutex_unlock(&ipa3_ctx->lock);
+		return 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	IPADBG("Sending MHI end point indication to modem\n");
+	memset(&req, 0, sizeof(struct ipa_endp_desc_indication_msg_v01));
+	req.ep_info_len = 2;
+	req.ep_info_valid = true;
+	req.num_eps_valid = true;
+	req.num_eps = 2;
+	ep_info = &req.ep_info[0];
+	ep_info->ep_id = ipa_mhi_cons_ep_idx;
+	ep_info->ic_type = DATA_IC_TYPE_MHI_V01;
+	ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_PROD_V01;
+	ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
+	ep_info = &req.ep_info[1];
+	ep_info->ep_id = ipa_mhi_prod_ep_idx;
+	ep_info->ic_type = DATA_IC_TYPE_MHI_V01;
+	ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_CONS_V01;
+	ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
+	return ipa3_qmi_send_endp_desc_indication(&req);
+}
+
+void ipa3_update_mhi_ctrl_state(u8 state, bool set)
+{
+	mutex_lock(&ipa3_ctx->lock);
+	if (set)
+		ipa3_ctx->mhi_ctrl_state |= state;
+	else
+		ipa3_ctx->mhi_ctrl_state &= ~state;
+	mutex_unlock(&ipa3_ctx->lock);
+	ipa_send_mhi_endp_ind_to_modem();
+}
+EXPORT_SYMBOL(ipa3_update_mhi_ctrl_state);
+/**
+ * ipa3_setup_uc_act_tbl() - IPA setup uc_act_tbl
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_setup_uc_act_tbl(void)
+{
+	int res = 0;
+	struct ipa_mem_buffer *tbl;
+	struct ipahal_reg_nat_uc_external_cfg nat_ex_cfg;
+	struct ipahal_reg_nat_uc_shared_cfg nat_share_cfg;
+	struct ipahal_reg_conn_track_uc_external_cfg ct_ex_cfg;
+	struct ipahal_reg_conn_track_uc_shared_cfg ct_share_cfg;
+
+	/* IPA version check */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("Not support!\n");
+		return -EPERM;
+	}
+
+	if (ipa3_ctx->uc_act_tbl_valid) {
+		IPAERR(" already allocate uC act tbl\n");
+		return -EEXIST;
+	}
+
+	tbl = &ipa3_ctx->uc_act_tbl;
+	/* Allocate uc act tbl */
+	tbl->size = sizeof(struct ipa_socksv5_uc_tmpl) * IPA_UC_ACT_TBL_SIZE;
+	tbl->base = dma_alloc_coherent(ipa3_ctx->pdev, tbl->size,
+		&tbl->phys_base, GFP_KERNEL);
+	if (tbl->base == NULL)
+		return -ENOMEM;
+	memset(tbl->base, 0, tbl->size);
+
+	ipa3_ctx->uc_act_tbl_valid = true;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* LSB 32 bits*/
+	nat_ex_cfg.nat_uc_external_table_addr_lsb =
+		(u32) (tbl->phys_base & 0xFFFFFFFF);
+	ipahal_write_reg_fields(IPA_NAT_UC_EXTERNAL_CFG, &nat_ex_cfg);
+	/* MSB 16 bits */
+	nat_share_cfg.nat_uc_external_table_addr_msb =
+		(u16) (((tbl->phys_base & 0xFFFFFFFF00000000) >> 32) & 0xFFFF);
+	ipahal_write_reg_fields(IPA_NAT_UC_SHARED_CFG, &nat_share_cfg);
+
+	/* LSB 32 bits*/
+	ct_ex_cfg.conn_track_uc_external_table_addr_lsb =
+		(u32) (tbl->phys_base & 0xFFFFFFFF);
+
+	ipahal_write_reg_fields(IPA_CONN_TRACK_UC_EXTERNAL_CFG, &ct_ex_cfg);
+	/* MSB 16 bits */
+	ct_share_cfg.conn_track_uc_external_table_addr_msb =
+		(u16) (((tbl->phys_base & 0xFFFFFFFF00000000) >> 32) & 0xFFFF);
+	ipahal_write_reg_fields(IPA_CONN_TRACK_UC_SHARED_CFG, &ct_share_cfg);
+
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+static void ipa3_socksv5_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	if (type != IPA_SOCKV5_ADD &&
+	    type != IPA_SOCKV5_DEL) {
+		IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+		kfree(buff);
+		return;
+	}
+
+	kfree(buff);
+}
+
+/**
+ * ipa3_add_socksv5_conn() - IPA add socksv5_conn
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_socksv5_conn(struct ipa_socksv5_info *info)
+{
+	int res = 0;
+	void *rp_va, *wp_va;
+	struct ipa_socksv5_msg *socksv5_msg;
+	struct ipa_msg_meta msg_meta;
+
+	/* IPA version check */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("Not support !\n");
+		return -EPERM;
+	}
+
+	if (!ipa3_ctx->uc_act_tbl_valid) {
+		IPAERR("uC act tbl haven't allocated\n");
+		return -ENOENT;
+	}
+
+	if (!info) {
+		IPAERR("Null info\n");
+		return -EIO;
+	}
+
+	mutex_lock(&ipa3_ctx->act_tbl_lock);
+	/* check the left # of entries */
+	if (ipa3_ctx->uc_act_tbl_total
+		>= IPA_UC_ACT_TBL_SIZE)	{
+		IPAERR("uc act tbl is full!\n");
+		res = -EFAULT;
+		goto error;
+	}
+
+	/* Copied the act-info to tbl */
+	wp_va = ipa3_ctx->uc_act_tbl.base +
+		ipa3_ctx->uc_act_tbl_next_index
+			* sizeof(struct ipa_socksv5_uc_tmpl);
+
+	/* check entry valid */
+	if ((info->ul_out.cmd_id != IPA_SOCKsv5_ADD_COM_ID)
+		|| (info->dl_out.cmd_id != IPA_SOCKsv5_ADD_COM_ID)) {
+		IPAERR("cmd_id not set UL%d DL%d!\n",
+			info->ul_out.cmd_id,
+			info->dl_out.cmd_id);
+		res = -EINVAL;
+		goto error;
+	}
+
+	if ((info->ul_out.cmd_param < IPA_SOCKsv5_ADD_V6_V4_COM_PM)
+		|| (info->ul_out.cmd_param > IPA_SOCKsv5_ADD_V6_V6_COM_PM)) {
+		IPAERR("ul cmd_param is not support%d!\n",
+			info->ul_out.cmd_param);
+		res = -EINVAL;
+		goto error;
+	}
+
+	if ((info->dl_out.cmd_param < IPA_SOCKsv5_ADD_V6_V4_COM_PM)
+		|| (info->dl_out.cmd_param > IPA_SOCKsv5_ADD_V6_V6_COM_PM)) {
+		IPAERR("dl cmd_param is not support%d!\n",
+			info->dl_out.cmd_param);
+		res = -EINVAL;
+		goto error;
+	}
+
+	/* indicate entry valid */
+	info->ul_out.ipa_sockv5_mask |= IPA_SOCKSv5_ENTRY_VALID;
+	info->dl_out.ipa_sockv5_mask |= IPA_SOCKSv5_ENTRY_VALID;
+
+	memcpy(wp_va, &(info->ul_out), sizeof(info->ul_out));
+	memcpy(wp_va + sizeof(struct ipa_socksv5_uc_tmpl),
+		&(info->dl_out), sizeof(info->dl_out));
+
+	/* set output handle */
+	info->handle = (uint16_t) ipa3_ctx->uc_act_tbl_next_index;
+
+	ipa3_ctx->uc_act_tbl_total += 2;
+
+	/* send msg to ipacm */
+	socksv5_msg = kzalloc(sizeof(*socksv5_msg), GFP_KERNEL);
+	if (!socksv5_msg) {
+		IPAERR("socksv5_msg memory allocation failed !\n");
+		res = -ENOMEM;
+		goto error;
+	}
+	memcpy(&(socksv5_msg->ul_in), &(info->ul_in), sizeof(info->ul_in));
+	memcpy(&(socksv5_msg->dl_in), &(info->dl_in), sizeof(info->dl_in));
+	socksv5_msg->handle = info->handle;
+	socksv5_msg->ul_in.index =
+		(uint16_t) ipa3_ctx->uc_act_tbl_next_index;
+	socksv5_msg->dl_in.index =
+		(uint16_t) ipa3_ctx->uc_act_tbl_next_index + 1;
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_SOCKV5_ADD;
+	msg_meta.msg_len = sizeof(struct ipa_socksv5_msg);
+	/* post event to ipacm*/
+	res = ipa3_send_msg(&msg_meta, socksv5_msg, ipa3_socksv5_msg_free_cb);
+	if (res) {
+		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		kfree(socksv5_msg);
+		goto error;
+	}
+
+	if (ipa3_ctx->uc_act_tbl_total < IPA_UC_ACT_TBL_SIZE) {
+		/* find next free spot */
+		do {
+			ipa3_ctx->uc_act_tbl_next_index += 2;
+			ipa3_ctx->uc_act_tbl_next_index %=
+				IPA_UC_ACT_TBL_SIZE;
+
+			rp_va =  ipa3_ctx->uc_act_tbl.base +
+				ipa3_ctx->uc_act_tbl_next_index
+					* sizeof(struct ipa_socksv5_uc_tmpl);
+
+			if (!((((struct ipa_socksv5_uc_tmpl *) rp_va)->
+				ipa_sockv5_mask) & IPA_SOCKSv5_ENTRY_VALID)) {
+				IPADBG("next available entry %d, total %d\n",
+				ipa3_ctx->uc_act_tbl_next_index,
+				ipa3_ctx->uc_act_tbl_total);
+				break;
+			}
+		} while (rp_va != wp_va);
+
+		if (rp_va == wp_va) {
+			/* set to max tbl size to debug */
+			IPAERR("can't find available spot!\n");
+			ipa3_ctx->uc_act_tbl_total = IPA_UC_ACT_TBL_SIZE;
+			res = -EFAULT;
+		}
+	}
+
+error:
+	mutex_unlock(&ipa3_ctx->act_tbl_lock);
+	return res;
+}
+
+/**
+ * ipa3_del_socksv5_conn() - IPA add socksv5_conn
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_socksv5_conn(uint32_t handle)
+{
+	int res = 0;
+	void *rp_va;
+	uint32_t *socksv5_handle;
+	struct ipa_msg_meta msg_meta;
+
+	/* IPA version check */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		IPAERR("Not support !\n");
+		return -EPERM;
+	}
+
+	if (!ipa3_ctx->uc_act_tbl_valid) {
+		IPAERR("uC act tbl haven't allocated\n");
+		return -ENOENT;
+	}
+
+	if (handle > IPA_UC_ACT_TBL_SIZE || handle < 0) {
+		IPAERR("invalid handle!\n");
+		return -EINVAL;
+	}
+
+	if ((handle % 2) != 0) {
+		IPAERR("invalid handle!\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->uc_act_tbl_total < 2) {
+		IPAERR("invalid handle, all tbl is empty!\n");
+		return -EINVAL;
+	}
+
+	rp_va =  ipa3_ctx->uc_act_tbl.base +
+			handle * sizeof(struct ipa_socksv5_uc_tmpl);
+
+	/* check entry is valid or not */
+	mutex_lock(&ipa3_ctx->act_tbl_lock);
+	if (!((((struct ipa_socksv5_uc_tmpl *) rp_va)->
+		ipa_sockv5_mask) & IPA_SOCKSv5_ENTRY_VALID)) {
+		IPADBG(" entry %d already free\n", handle);
+	}
+
+	if (!((((struct ipa_socksv5_uc_tmpl *) (rp_va +
+		sizeof(struct ipa_socksv5_uc_tmpl)))->
+		ipa_sockv5_mask) & IPA_SOCKSv5_ENTRY_VALID)) {
+		IPADBG(" entry %d already free\n", handle);
+	}
+
+	((struct ipa_socksv5_uc_tmpl *) rp_va)->ipa_sockv5_mask
+		&= ~IPA_SOCKSv5_ENTRY_VALID;
+	((struct ipa_socksv5_uc_tmpl *) (rp_va +
+		sizeof(struct ipa_socksv5_uc_tmpl)))->ipa_sockv5_mask
+			&= ~IPA_SOCKSv5_ENTRY_VALID;
+	ipa3_ctx->uc_act_tbl_total -= 2;
+
+	IPADBG("free entry %d and %d, left total %d\n",
+		handle,
+		handle + 1,
+		ipa3_ctx->uc_act_tbl_total);
+
+	/* send msg to ipacm */
+	socksv5_handle = kzalloc(sizeof(*socksv5_handle), GFP_KERNEL);
+	if (!socksv5_handle) {
+		IPAERR("socksv5_handle memory allocation failed!\n");
+		res = -ENOMEM;
+		goto error;
+	}
+	memcpy(socksv5_handle, &handle, sizeof(handle));
+	msg_meta.msg_type = IPA_SOCKV5_DEL;
+	msg_meta.msg_len = sizeof(uint32_t);
+	res = ipa3_send_msg(&msg_meta, socksv5_handle,
+		ipa3_socksv5_msg_free_cb);
+	if (res) {
+		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		kfree(socksv5_handle);
+	}
+
+error:
+	mutex_unlock(&ipa3_ctx->act_tbl_lock);
+	return res;
+}

+ 9 - 0
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/debugfs.h>
@@ -50,6 +51,8 @@ static const char *ipahal_pkt_status_exception_to_str
 	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
 	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT),
 	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_UCP),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_INVALID_PIPE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_HDRI),
 	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_CSUM),
 };
 
@@ -1294,9 +1297,15 @@ static enum ipahal_pkt_status_exception pkt_status_parse_exception(
 	case 128:
 		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_UCP;
 		break;
+	case 129:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_INVALID_PIPE;
+		break;
 	case 131:
 		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_RQOS;
 		break;
+	case 136:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_HDRI;
+		break;
 	case 229:
 		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_CSUM;
 		break;

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _IPAHAL_H_
@@ -540,7 +541,9 @@ enum ipahal_pkt_status_exception {
 	IPAHAL_PKT_STATUS_EXCEPTION_NAT,
 	IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT,
 	IPAHAL_PKT_STATUS_EXCEPTION_UCP,
+	IPAHAL_PKT_STATUS_EXCEPTION_INVALID_PIPE,
 	IPAHAL_PKT_STATUS_EXCEPTION_RQOS,
+	IPAHAL_PKT_STATUS_EXCEPTION_HDRI,
 	IPAHAL_PKT_STATUS_EXCEPTION_CSUM,
 	IPAHAL_PKT_STATUS_EXCEPTION_MAX,
 };

+ 54 - 41
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c

@@ -968,6 +968,7 @@ static int ipa_flt_gen_hw_rule_ipav5_5(
 * @flt_parse_hw_rule: Parse flt rule read from H/W
 * @eq_bitfield: Array of the bit fields of the support equations.
 *	0xFF means the equation is not supported
+* @prefetech_buf_size: Prefetch buf size;
 */
 struct ipahal_fltrt_obj {
 	bool support_hash;
@@ -997,6 +998,7 @@ struct ipahal_fltrt_obj {
 	int(*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
 	int(*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
 	u8 eq_bitfield[IPA_EQ_MAX];
+	u32 prefetech_buf_size;
 };
 
 /*
@@ -1051,6 +1053,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
 			[IPA_IS_FRAG]			= 15,
 			[IPA_IS_PURE_ACK]		= 0xFF,
 		},
+		IPA3_0_HW_RULE_PREFETCH_BUF_SIZE,
 	},
 
 	/* IPAv4 */
@@ -1096,6 +1099,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
 			[IPA_IS_FRAG]			= 15,
 			[IPA_IS_PURE_ACK]		= 0xFF,
 		},
+		IPA3_0_HW_RULE_PREFETCH_BUF_SIZE,
 	},
 
 	/* IPAv4.2 */
@@ -1141,6 +1145,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
 			[IPA_IS_FRAG]			= 15,
 			[IPA_IS_PURE_ACK]		= 0xFF,
 		},
+		IPA3_0_HW_RULE_PREFETCH_BUF_SIZE,
 	},
 
 	/* IPAv4.5 */
@@ -1186,51 +1191,53 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
 			[IPA_IS_FRAG]			= 15,
 			[IPA_IS_PURE_ACK]		= 0,
 		},
+		IPA3_0_HW_RULE_PREFETCH_BUF_SIZE,
 	},
 
 	/* IPAv5 */
 	[IPA_HW_v5_0] = {
-			true,
-			IPA3_0_HW_TBL_WIDTH,
-			IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
-			IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
-			IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
-			IPA3_0_HW_RULE_START_ALIGNMENT,
-			IPA3_0_HW_TBL_HDR_WIDTH,
-			IPA3_0_HW_TBL_ADDR_MASK,
-			IPA5_0_RULE_MAX_PRIORITY,
-			IPA5_0_RULE_MIN_PRIORITY,
-			IPA3_0_LOW_RULE_ID,
-			IPA3_0_RULE_ID_BIT_LEN,
-			IPA3_0_HW_RULE_BUF_SIZE,
-			ipa_write_64,
-			ipa_fltrt_create_flt_bitmap_v5_0,
-			ipa_fltrt_create_tbl_addr,
-			ipa_fltrt_parse_tbl_addr,
-			ipa_rt_gen_hw_rule_ipav5_0,
-			ipa_flt_gen_hw_rule_ipav5_0,
-			ipa_flt_generate_eq,
-			ipa_rt_parse_hw_rule_ipav5_0,
-			ipa_flt_parse_hw_rule_ipav5_0,
-			{
-				[IPA_TOS_EQ] = 0xFF,
-				[IPA_PROTOCOL_EQ] = 1,
-				[IPA_TC_EQ] = 2,
-				[IPA_OFFSET_MEQ128_0] = 3,
-				[IPA_OFFSET_MEQ128_1] = 4,
-				[IPA_OFFSET_MEQ32_0] = 5,
-				[IPA_OFFSET_MEQ32_1] = 6,
-				[IPA_IHL_OFFSET_MEQ32_0] = 7,
-				[IPA_IHL_OFFSET_MEQ32_1] = 8,
-				[IPA_METADATA_COMPARE] = 9,
-				[IPA_IHL_OFFSET_RANGE16_0] = 10,
-				[IPA_IHL_OFFSET_RANGE16_1] = 11,
-				[IPA_IHL_OFFSET_EQ_32] = 12,
-				[IPA_IHL_OFFSET_EQ_16] = 13,
-				[IPA_FL_EQ] = 14,
-				[IPA_IS_FRAG] = 15,
-				[IPA_IS_PURE_ACK] = 0,
-			},
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA5_0_RULE_MAX_PRIORITY,
+		IPA5_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap_v5_0,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule_ipav5_0,
+		ipa_flt_gen_hw_rule_ipav5_0,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule_ipav5_0,
+		ipa_flt_parse_hw_rule_ipav5_0,
+		{
+			[IPA_TOS_EQ] = 0xFF,
+			[IPA_PROTOCOL_EQ] = 1,
+			[IPA_TC_EQ] = 2,
+			[IPA_OFFSET_MEQ128_0] = 3,
+			[IPA_OFFSET_MEQ128_1] = 4,
+			[IPA_OFFSET_MEQ32_0] = 5,
+			[IPA_OFFSET_MEQ32_1] = 6,
+			[IPA_IHL_OFFSET_MEQ32_0] = 7,
+			[IPA_IHL_OFFSET_MEQ32_1] = 8,
+			[IPA_METADATA_COMPARE] = 9,
+			[IPA_IHL_OFFSET_RANGE16_0] = 10,
+			[IPA_IHL_OFFSET_RANGE16_1] = 11,
+			[IPA_IHL_OFFSET_EQ_32] = 12,
+			[IPA_IHL_OFFSET_EQ_16] = 13,
+			[IPA_FL_EQ] = 14,
+			[IPA_IS_FRAG] = 15,
+			[IPA_IS_PURE_ACK] = 0,
+		},
+		IPA3_0_HW_RULE_PREFETCH_BUF_SIZE,
 	},
 
 	/* IPAv5.5 */
@@ -4771,6 +4778,12 @@ u32 ipahal_get_lcl_tbl_addr_alignment(void)
 	return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
 }
 
+/* Get the H/W (flt/rt) prefetch buf size */
+u32 ipahal_get_hw_prefetch_buf_size(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].prefetech_buf_size;
+}
+
 /*
  * Rule priority is used to distinguish rules order
  * at the integrated table consisting from hashable and

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h

@@ -149,6 +149,9 @@ u32 ipahal_get_hw_tbl_hdr_width(void);
  */
 u32 ipahal_get_lcl_tbl_addr_alignment(void);
 
+/* Get the H/W (flt/rt) prefetch buf size */
+u32 ipahal_get_hw_prefetch_buf_size(void);
+
 /*
  * Rule priority is used to distinguish rules order
  * at the integrated table consisting from hashable and

+ 1 - 0
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h

@@ -44,6 +44,7 @@ enum ipa_fltrt_equations {
 #define IPA3_0_HW_TBL_ADDR_MASK (127)
 #define IPA3_0_HW_RULE_BUF_SIZE (256)
 #define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (128)
 
 
 /*

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c

@@ -242,16 +242,16 @@ static int ipa_nat_ipv6ct_stringify_entry_v_4_5(const void *entry,
 	char *buff, size_t buff_size)
 {
 	int length;
-	const struct ipa_nat_hw_ipv4_entry *nat_entry =
-		(const struct ipa_nat_hw_ipv4_entry *)entry;
+	const struct ipa_nat_hw_ipv6ct_entry *ipv6ct_entry =
+		(const struct ipa_nat_hw_ipv6ct_entry *)entry;
 
 	length = ipa_nat_ipv6ct_stringify_entry_v_4_0(entry, buff, buff_size);
 
 	length += scnprintf(buff + length, buff_size - length,
 		"\t\tucp=%s address=%s uc_activation_index=%d\n",
-		(nat_entry->ucp) ? "Enabled" : "Disabled",
-		(nat_entry->s) ? "System" : "Local",
-		nat_entry->uc_activation_index);
+		(ipv6ct_entry->ucp) ? "Enabled" : "Disabled",
+		(ipv6ct_entry->s) ? "System" : "Local",
+		ipv6ct_entry->uc_activation_index);
 
 	return length;
 }

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c

@@ -3921,7 +3921,7 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 		ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
 		-1, 0, 0, 0, 0, 0},
 	[IPA_HW_v4_0][IPA_QSB_MAX_READS] = {
-		ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_dummy,
+		ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_qsb_max_reads,
 		0x00000078, 0, 0, 0, 0, 0},
 	[IPA_HW_v4_0][IPA_FILT_ROUT_HASH_FLUSH] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,

+ 3 - 30
drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c

@@ -1637,6 +1637,7 @@ static void apps_ipa_packet_receive_notify(void *priv,
 		IPAWANDBG_LOW("Rx packet was received");
 		skb->dev = IPA_NETDEV();
 		skb->protocol = htons(ETH_P_MAP);
+		skb_set_mac_header(skb, 0);
 
 		/* default traffic uses rx-0 queue. */
 		skb_record_rx_queue(skb, 0);
@@ -1666,34 +1667,6 @@ static void apps_ipa_packet_receive_notify(void *priv,
 	}
 }
 
-/* Send MHI endpoint info to modem using QMI indication message */
-static int ipa_send_mhi_endp_ind_to_modem(void)
-{
-	struct ipa_endp_desc_indication_msg_v01 req;
-	struct ipa_ep_id_type_v01 *ep_info;
-	int ipa_mhi_prod_ep_idx =
-		ipa3_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_PROD);
-	int ipa_mhi_cons_ep_idx =
-		ipa3_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_CONS);
-
-	memset(&req, 0, sizeof(struct ipa_endp_desc_indication_msg_v01));
-	req.ep_info_len = 2;
-	req.ep_info_valid = true;
-	req.num_eps_valid = true;
-	req.num_eps = 2;
-	ep_info = &req.ep_info[0];
-	ep_info->ep_id = ipa_mhi_cons_ep_idx;
-	ep_info->ic_type = DATA_IC_TYPE_MHI_V01;
-	ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_PROD_V01;
-	ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
-	ep_info = &req.ep_info[1];
-	ep_info->ep_id = ipa_mhi_prod_ep_idx;
-	ep_info->ic_type = DATA_IC_TYPE_MHI_V01;
-	ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_CONS_V01;
-	ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
-	return ipa3_qmi_send_endp_desc_indication(&req);
-}
-
 /* Send RSC endpoint info to modem using QMI indication message */
 static int ipa_send_wan_pipe_ind_to_modem(int ingress_eps_mask)
 {
@@ -3980,6 +3953,7 @@ static int ipa3_lcl_mdm_ssr_notifier_cb(struct notifier_block *this,
 #endif
 		IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
 		/* send SSR before-shutdown notification to IPACM */
+		ipa3_set_modem_up(false);
 		rmnet_ipa_send_ssr_notification(false);
 		atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
 		ipa3_q6_pre_shutdown_cleanup();
@@ -4004,7 +3978,6 @@ static int ipa3_lcl_mdm_ssr_notifier_cb(struct notifier_block *this,
 	case SUBSYS_AFTER_SHUTDOWN:
 #endif
 		IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
-		ipa3_set_modem_up(false);
 		/* Clean up netdev resources in AFTER_SHUTDOWN for remoteproc
 		 * enabled targets. */
 #if IS_ENABLED(CONFIG_QCOM_Q6V5_PAS)
@@ -4046,7 +4019,6 @@ static int ipa3_lcl_mdm_ssr_notifier_cb(struct notifier_block *this,
 	case SUBSYS_AFTER_POWERUP:
 #endif
 		IPAWANINFO("IPA received MPSS AFTER_POWERUP\n");
-		ipa3_set_modem_up(true);
 		if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) &&
 		       atomic_read(&rmnet_ipa3_ctx->is_ssr))
 			platform_driver_register(&rmnet_ipa_driver);
@@ -5665,6 +5637,7 @@ void ipa3_q6_handshake_complete(bool ssr_bootup)
 	if (ipa3_ctx->ipa_mhi_proxy)
 		imp_handle_modem_ready();
 
+	ipa3_set_modem_up(true);
 	if (ipa3_ctx->ipa_config_is_mhi)
 		ipa_send_mhi_endp_ind_to_modem();
 }

+ 3 - 1
drivers/platform/msm/ipa/ipa_v3/teth_bridge.c

@@ -30,6 +30,8 @@
 	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
 #define TETH_ERR(fmt, args...) \
 	pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define TETH_ERR_RL(fmt, args...) \
+	pr_err_ratelimited_ipa(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
 
 enum ipa_num_teth_iface {
 	IPA_TETH_IFACE_1 = 0,
@@ -76,7 +78,7 @@ static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
 		return;
 	}
 
-	TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+	TETH_ERR_RL("Unexpected exception packet from USB, dropping packet\n");
 	dev_kfree_skb_any(skb);
 	TETH_DBG_FUNC_EXIT();
 }