ソースを参照

dfc: port the latest fixes

Port the latest fixes from msm-4.19. This includes:
- Clear status flags when entering powersave (Ifc6457f1696ef)
- Adjust QMAP query grants (I9da42a9d2425)
- fix use-after-free (I76ab4a99b3f4)

Change-Id: I1beb93d1f6fdf214c8756bd0e488914bf876aa5e
Acked-by: Weiyi Chen <[email protected]>
Signed-off-by: Subash Abhinov Kasiviswanathan <[email protected]>
Subash Abhinov Kasiviswanathan 5 年 前
コミット
2724751ae6
6 ファイル変更129 行追加23 行削除
  1. 30 0
      core/dfc.h
  2. 5 2
      core/dfc_defs.h
  3. 18 6
      core/dfc_qmap.c
  4. 62 9
      core/dfc_qmi.c
  5. 11 6
      core/qmi_rmnet.c
  6. 3 0
      core/qmi_rmnet_i.h

+ 30 - 0
core/dfc.h

@@ -262,6 +262,36 @@ TRACE_EVENT(dfc_qmap,
 		__print_hex(__get_dynamic_array(data), __entry->len))
 );
 
+TRACE_EVENT(dfc_adjust_grant,
+
+	TP_PROTO(u8 mux_id, u8 bearer_id, u32 grant, u32 rx_bytes,
+		 u32 inflight, u32 a_grant),
+
+	TP_ARGS(mux_id, bearer_id, grant, rx_bytes, inflight, a_grant),
+
+	TP_STRUCT__entry(
+		__field(u8, mux_id)
+		__field(u8, bearer_id)
+		__field(u32, grant)
+		__field(u32, rx_bytes)
+		__field(u32, inflight)
+		__field(u32, a_grant)
+	),
+
+	TP_fast_assign(
+		__entry->mux_id = mux_id;
+		__entry->bearer_id = bearer_id;
+		__entry->grant = grant;
+		__entry->rx_bytes = rx_bytes;
+		__entry->inflight = inflight;
+		__entry->a_grant = a_grant;
+	),
+
+	TP_printk("mid=%u bid=%u grant=%u rx=%u inflight=%u adjusted_grant=%u",
+		__entry->mux_id, __entry->bearer_id, __entry->grant,
+		__entry->rx_bytes, __entry->inflight, __entry->a_grant)
+);
+
 #endif /* _TRACE_DFC_H */
 
 /* This part must be outside protection */

+ 5 - 2
core/dfc_defs.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DFC_DEFS_H
@@ -53,6 +53,8 @@ struct dfc_flow_status_info_type_v01 {
 	u16 seq_num;
 	u8 qos_ids_len;
 	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
+	u8 rx_bytes_valid;
+	u32 rx_bytes;
 };
 
 struct dfc_ancillary_info_type_v01 {
@@ -88,7 +90,8 @@ struct dfc_tx_link_status_ind_msg_v01 {
 };
 
 void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
-			       struct dfc_flow_status_ind_msg_v01 *ind);
+			       struct dfc_flow_status_ind_msg_v01 *ind,
+			       bool is_query);
 
 void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
 				   struct dfc_tx_link_status_ind_msg_v01 *ind);

+ 18 - 6
core/dfc_qmap.c

@@ -65,13 +65,14 @@ struct qmap_dfc_ind {
 	u8			reserved2;
 	u8			tx_info_valid:1;
 	u8			tx_info:1;
-	u8			reserved3:6;
+	u8			rx_bytes_valid:1;
+	u8			reserved3:5;
 	u8			bearer_id;
 	u8			tcp_bidir:1;
 	u8			bearer_status:3;
 	u8			reserved4:4;
 	__be32			grant;
-	u32			reserved5;
+	__be32			rx_bytes;
 	u32			reserved6;
 } __aligned(1);
 
@@ -89,11 +90,12 @@ struct qmap_dfc_query_resp {
 	u8			cmd_ver;
 	u8			bearer_id;
 	u8			tcp_bidir:1;
-	u8			reserved:7;
+	u8			rx_bytes_valid:1;
+	u8			reserved:6;
 	u8			invalid:1;
 	u8			reserved2:7;
 	__be32			grant;
-	u32			reserved3;
+	__be32			rx_bytes;
 	u32			reserved4;
 } __aligned(1);
 
@@ -185,6 +187,11 @@ static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
 	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
 	qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
 
+	if (cmd->rx_bytes_valid) {
+		qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
+		qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
+	}
+
 	if (cmd->tcp_bidir) {
 		qmap_flow_ind.ancillary_info_valid = 1;
 		qmap_flow_ind.ancillary_info_len = 1;
@@ -193,7 +200,7 @@ static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
 		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
 	}
 
-	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
 
 done:
 	return QMAP_CMD_ACK;
@@ -221,6 +228,11 @@ static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
 	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
 	qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
 
+	if (cmd->rx_bytes_valid) {
+		qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
+		qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
+	}
+
 	if (cmd->tcp_bidir) {
 		qmap_flow_ind.ancillary_info_valid = 1;
 		qmap_flow_ind.ancillary_info_len = 1;
@@ -229,7 +241,7 @@ static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
 		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
 	}
 
-	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
 
 	return QMAP_CMD_DONE;
 }

+ 62 - 9
core/dfc_qmi.c

@@ -953,6 +953,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
 		bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
 		bearer->last_grant = fc_info->num_bytes;
 		bearer->last_seq = fc_info->seq_num;
+		bearer->last_adjusted_grant = fc_info->num_bytes;
 
 		dfc_bearer_flow_ctl(dev, bearer, qos);
 	}
@@ -960,13 +961,40 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
 	return 0;
 }
 
+static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer,
+			    struct dfc_flow_status_info_type_v01 *fc_info)
+{
+	u32 grant;
+
+	if (!fc_info->rx_bytes_valid)
+		return fc_info->num_bytes;
+
+	if (bearer->bytes_in_flight > fc_info->rx_bytes)
+		bearer->bytes_in_flight -= fc_info->rx_bytes;
+	else
+		bearer->bytes_in_flight = 0;
+
+	/* Adjusted grant = grant - bytes_in_flight */
+	if (fc_info->num_bytes > bearer->bytes_in_flight)
+		grant = fc_info->num_bytes - bearer->bytes_in_flight;
+	else
+		grant = 0;
+
+	trace_dfc_adjust_grant(fc_info->mux_id, fc_info->bearer_id,
+			       fc_info->num_bytes, fc_info->rx_bytes,
+			       bearer->bytes_in_flight, grant);
+	return grant;
+}
+
 static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 			     u8 ack_req, u32 ancillary,
-			     struct dfc_flow_status_info_type_v01 *fc_info)
+			     struct dfc_flow_status_info_type_v01 *fc_info,
+			     bool is_query)
 {
 	struct rmnet_bearer_map *itm = NULL;
 	int rc = 0;
 	bool action = false;
+	u32 adjusted_grant;
 
 	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
 	if (!itm)
@@ -986,8 +1014,16 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 		if (itm->tx_off  && fc_info->num_bytes > 0)
 			return 0;
 
-		if ((itm->grant_size == 0 && fc_info->num_bytes > 0) ||
-		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
+		/* Adjuste grant for query */
+		if (dfc_qmap && is_query) {
+			adjusted_grant = dfc_adjust_grant(itm, fc_info);
+		} else {
+			adjusted_grant = fc_info->num_bytes;
+			itm->bytes_in_flight = 0;
+		}
+
+		if ((itm->grant_size == 0 && adjusted_grant > 0) ||
+		    (itm->grant_size > 0 && adjusted_grant == 0))
 			action = true;
 
 		/* This is needed by qmap */
@@ -995,13 +1031,24 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 			dfc_qmap_send_ack(qos, itm->bearer_id,
 					  itm->seq, DFC_ACK_TYPE_DISABLE);
 
-		itm->grant_size = fc_info->num_bytes;
-		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
+		itm->grant_size = adjusted_grant;
+
+		/* No further query if the adjusted grant is less
+		 * than 20% of the original grant
+		 */
+		if (dfc_qmap && is_query &&
+		    itm->grant_size < (fc_info->num_bytes / 5))
+			itm->grant_thresh = itm->grant_size;
+		else
+			itm->grant_thresh =
+				qmi_rmnet_grant_per(itm->grant_size);
+
 		itm->seq = fc_info->seq_num;
 		itm->ack_req = ack_req;
 		itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
 		itm->last_grant = fc_info->num_bytes;
 		itm->last_seq = fc_info->seq_num;
+		itm->last_adjusted_grant = adjusted_grant;
 
 		if (action)
 			rc = dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1011,7 +1058,8 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
 }
 
 void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
-			       struct dfc_flow_status_ind_msg_v01 *ind)
+			       struct dfc_flow_status_ind_msg_v01 *ind,
+			       bool is_query)
 {
 	struct net_device *dev;
 	struct qos_info *qos;
@@ -1067,7 +1115,8 @@ void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
 				dev, qos, ack_req, ancillary, flow_status);
 		else
 			dfc_update_fc_map(
-				dev, qos, ack_req, ancillary, flow_status);
+				dev, qos, ack_req, ancillary, flow_status,
+				is_query);
 
 		spin_unlock_bh(&qos->qos_lock);
 	}
@@ -1093,6 +1142,7 @@ static void dfc_update_tx_link_status(struct net_device *dev,
 	if (itm->grant_size && !tx_status) {
 		itm->grant_size = 0;
 		itm->tcp_bidir = false;
+		itm->bytes_in_flight = 0;
 		dfc_bearer_flow_ctl(dev, itm, qos);
 	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
 		itm->grant_size = DEFAULT_GRANT;
@@ -1170,7 +1220,8 @@ static void dfc_qmi_ind_work(struct work_struct *work)
 		if (!dfc->restart_state) {
 			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
 				dfc_do_burst_flow_control(
-						dfc, &svc_ind->d.dfc_info);
+						dfc, &svc_ind->d.dfc_info,
+						false);
 			else if (svc_ind->msg_id ==
 					QMI_DFC_TX_LINK_STATUS_IND_V01)
 				dfc_handle_tx_link_status_ind(
@@ -1445,6 +1496,8 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
 	trace_dfc_flow_check(dev->name, bearer->bearer_id,
 			     len, mark, bearer->grant_size);
 
+	bearer->bytes_in_flight += len;
+
 	if (!bearer->grant_size)
 		goto out;
 
@@ -1498,7 +1551,7 @@ void dfc_qmi_query_flow(void *dfc_data)
 	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
 	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
 		sizeof(resp->flow_status[0]) * resp->flow_status_len);
-	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);
+	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info, true);
 
 done:
 	kfree(svc_ind);

+ 11 - 6
core/qmi_rmnet.c

@@ -471,7 +471,8 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
 	int i;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		if (qmi->dfc_clients[i] && !dfc_qmap)
+		if (qmi->dfc_clients[i] && !dfc_qmap &&
+		    !qmi->dfc_client_exiting[i])
 			dfc_qmi_query_flow(qmi->dfc_clients[i]);
 	}
 }
@@ -547,6 +548,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 			err = dfc_qmap_client_init(port, idx, &svc, qmi);
 		else
 			err = dfc_qmi_client_init(port, idx, &svc, qmi);
+		qmi->dfc_client_exiting[idx] = false;
 	}
 
 	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
@@ -607,6 +609,7 @@ qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 		qmi->wda_client = NULL;
 		qmi->wda_pending = NULL;
 	} else {
+		qmi->dfc_client_exiting[idx] = true;
 		qmi_rmnet_flush_ps_wq();
 	}
 
@@ -722,15 +725,18 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
 	spin_lock_bh(&qos->qos_lock);
 
 	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		bearer->seq = 0;
+		bearer->ack_req = 0;
+		bearer->bytes_in_flight = 0;
+		bearer->tcp_bidir = false;
+		bearer->rat_switch = false;
+
 		if (bearer->tx_off)
 			continue;
+
 		do_wake = !bearer->grant_size;
 		bearer->grant_size = DEFAULT_GRANT;
 		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
-		bearer->seq = 0;
-		bearer->ack_req = 0;
-		bearer->tcp_bidir = false;
-		bearer->rat_switch = false;
 
 		if (do_wake)
 			dfc_bearer_flow_ctl(dev, bearer, qos);
@@ -989,7 +995,6 @@ int qmi_rmnet_ps_ind_deregister(void *port,
 			goto done;
 		}
 	}
-
 done:
 	return 0;
 }

+ 3 - 0
core/qmi_rmnet_i.h

@@ -48,6 +48,8 @@ struct rmnet_bearer_map {
 	u8  ack_req;
 	u32 last_grant;
 	u16 last_seq;
+	u32 bytes_in_flight;
+	u32 last_adjusted_grant;
 	bool tcp_bidir;
 	bool rat_switch;
 	bool tx_off;
@@ -92,6 +94,7 @@ struct qmi_info {
 	void *wda_pending;
 	void *dfc_clients[MAX_CLIENT_NUM];
 	void *dfc_pending[MAX_CLIENT_NUM];
+	bool dfc_client_exiting[MAX_CLIENT_NUM];
 	unsigned long ps_work_active;
 	bool ps_enabled;
 	bool dl_msg_active;