Browse Source

qcacld-3.0: Call calbacks for hold req while removing it from queue

During SSR the hold req is not removed from queue and if HDD is
waiting for the response from WMA/FW, the HDD may timeout and due
to this shutdown ack is not sent to modem which results in modem
crash.

To avoid this, remove hold req from queue during ssr and call
their calback when SSR is in progress.

Change-Id: I462726c524cb15ff9aa88b051467f038ab1f4e67
CRs-Fixed: 2038200
Abhishek Singh 8 years ago
parent
commit
c614ec428e
2 changed files with 104 additions and 95 deletions
  1. 69 50
      core/wma/src/wma_dev_if.c
  2. 35 45
      core/wma/src/wma_main.c

+ 69 - 50
core/wma/src/wma_dev_if.c

@@ -404,6 +404,27 @@ static struct wma_target_req *wma_find_vdev_req(tp_wma_handle wma,
 	return req_msg;
 }
 
+/**
+ * wma_send_del_sta_self_resp() - send del sta self resp to Upper layer
+ * @param: params of del sta resp
+ *
+ * Return: none
+ */
+static inline void wma_send_del_sta_self_resp(struct del_sta_self_params *param)
+{
+	struct scheduler_msg sme_msg = {0};
+	QDF_STATUS status;
+
+	sme_msg.type = eWNI_SME_DEL_STA_SELF_RSP;
+	sme_msg.bodyptr = param;
+
+	status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg);
+	if (!QDF_IS_STATUS_SUCCESS(status)) {
+		WMA_LOGE("Failed to post eWNI_SME_DEL_STA_SELF_RSP");
+		qdf_mem_free(param);
+	}
+}
+
 /**
  * wma_vdev_detach_callback() - send vdev detach response to upper layer
  * @ctx: txrx node ptr
@@ -416,8 +437,6 @@ static void wma_vdev_detach_callback(void *ctx)
 	struct wma_txrx_node *iface = (struct wma_txrx_node *)ctx;
 	struct del_sta_self_params *param;
 	struct wma_target_req *req_msg;
-	QDF_STATUS status = QDF_STATUS_SUCCESS;
-	struct scheduler_msg sme_msg = { 0 };
 
 	wma = cds_get_context(QDF_MODULE_ID_WMA);
 
@@ -454,15 +473,7 @@ static void wma_vdev_detach_callback(void *ctx)
 
 	qdf_mem_zero(iface, sizeof(*iface));
 	param->status = QDF_STATUS_SUCCESS;
-	sme_msg.type = eWNI_SME_DEL_STA_SELF_RSP;
-	sme_msg.bodyptr = param;
-	sme_msg.bodyval = 0;
-
-	status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg);
-	if (!QDF_IS_STATUS_SUCCESS(status)) {
-		WMA_LOGE("Failed to post eWNI_SME_ADD_STA_SELF_RSP");
-		qdf_mem_free(param);
-	}
+	wma_send_del_sta_self_resp(param);
 }
 
 
@@ -542,7 +553,6 @@ static QDF_STATUS wma_handle_vdev_detach(tp_wma_handle wma_handle,
 	uint8_t vdev_id = del_sta_self_req_param->session_id;
 	struct wma_txrx_node *iface = &wma_handle->interfaces[vdev_id];
 	struct wma_target_req *msg = NULL;
-	struct scheduler_msg sme_msg = { 0 };
 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
 
 	if (!soc) {
@@ -610,17 +620,8 @@ out:
 		qdf_mem_free(iface->staKeyParams);
 	qdf_mem_zero(iface, sizeof(*iface));
 	del_sta_self_req_param->status = status;
-	if (generate_rsp) {
-		sme_msg.type = eWNI_SME_DEL_STA_SELF_RSP;
-		sme_msg.bodyptr = del_sta_self_req_param;
-		sme_msg.bodyval = 0;
-
-		status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg);
-		if (!QDF_IS_STATUS_SUCCESS(status)) {
-			WMA_LOGE("Failed to post eWNI_SME_DEL_STA_SELF_RSP");
-			qdf_mem_free(del_sta_self_req_param);
-		}
-	}
+	if (generate_rsp)
+		wma_send_del_sta_self_resp(del_sta_self_req_param);
 	return status;
 }
 /**
@@ -2549,23 +2550,48 @@ void wma_hold_req_timer(void *data)
 		}
 	} else if ((tgt_req->msg_type == WMA_DELETE_STA_REQ) &&
 		(tgt_req->type == WMA_DEL_P2P_SELF_STA_RSP_START)) {
+		struct del_sta_self_rsp_params *del_sta;
+		del_sta = (struct del_sta_self_rsp_params *)tgt_req->user_data;
+		del_sta->self_sta_param->status = QDF_STATUS_E_TIMEOUT;
 		WMA_LOGA(FL("wma delete sta p2p request timed out"));
-		QDF_ASSERT(0);
+
+		if (wma_crash_on_fw_timeout(wma->fw_timeout_crash)) {
+			QDF_BUG(0);
+		} else {
+			if (del_sta->generate_rsp)
+				wma_send_del_sta_self_resp(
+					del_sta->self_sta_param);
+		}
+		qdf_mem_free(tgt_req->user_data);
 	} else if ((tgt_req->msg_type == WMA_DELETE_STA_REQ) &&
 			(tgt_req->type == WMA_SET_LINK_PEER_RSP)) {
+		tpLinkStateParams params =
+			(tpLinkStateParams) tgt_req->user_data;
+
+		params->status = false;
 		WMA_LOGA(FL("wma delete peer for set link timed out"));
 		if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
 			QDF_BUG(0);
+		else
+			wma_send_msg(wma, WMA_SET_LINK_STATE_RSP,
+					params, 0);
 	} else if ((tgt_req->msg_type == WMA_DELETE_STA_REQ) &&
 			(tgt_req->type == WMA_DELETE_PEER_RSP)) {
-		 WMA_LOGE(FL("wma delete peer for del bss req timed out"));
+		tpDeleteBssParams params =
+			(tpDeleteBssParams) tgt_req->user_data;
+
+		params->status = QDF_STATUS_E_TIMEOUT;
+		WMA_LOGE(FL("wma delete peer for del bss req timed out"));
 		if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
 			QDF_BUG(0);
+		else
+			wma_send_msg(wma, WMA_DELETE_BSS_RSP, params, 0);
 	} else {
 		WMA_LOGE(FL("Unhandled timeout for msg_type:%d and type:%d"),
 				tgt_req->msg_type, tgt_req->type);
 		QDF_BUG(0);
 	}
+	qdf_mem_free(tgt_req);
 }
 
 /**
@@ -2586,6 +2612,9 @@ struct wma_target_req *wma_fill_hold_req(tp_wma_handle wma,
 	struct wma_target_req *req;
 	QDF_STATUS status;
 
+	if (cds_is_driver_recovering())
+		return NULL;
+
 	req = qdf_mem_malloc(sizeof(*req));
 	if (!req) {
 		WMA_LOGE(FL("Failed to allocate memory for msg %d vdev %d"),
@@ -2594,15 +2623,11 @@ struct wma_target_req *wma_fill_hold_req(tp_wma_handle wma,
 	}
 
 	WMA_LOGD(FL("vdev_id %d msg %d type %d"), vdev_id, msg_type, type);
-
+	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
 	req->vdev_id = vdev_id;
 	req->msg_type = msg_type;
 	req->type = type;
 	req->user_data = params;
-	qdf_mc_timer_init(&req->event_timeout, QDF_TIMER_TYPE_SW,
-			  wma_hold_req_timer, req);
-	qdf_mc_timer_start(&req->event_timeout, timeout);
-	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
 	status = qdf_list_insert_back(&wma->wma_hold_req_queue, &req->node);
 	if (QDF_STATUS_SUCCESS != status) {
 		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
@@ -2611,6 +2636,9 @@ struct wma_target_req *wma_fill_hold_req(tp_wma_handle wma,
 		return NULL;
 	}
 	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+	qdf_mc_timer_init(&req->event_timeout, QDF_TIMER_TYPE_SW,
+			  wma_hold_req_timer, req);
+	qdf_mc_timer_start(&req->event_timeout, timeout);
 	return req;
 }
 
@@ -2653,9 +2681,8 @@ void wma_vdev_resp_timer(void *data)
 	struct cdp_pdev *pdev;
 	struct wma_target_req *msg;
 	uint8_t peer_id;
+	int status;
 	void *peer;
-	QDF_STATUS status = QDF_STATUS_SUCCESS;
-	struct scheduler_msg sme_msg = { 0 };
 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
 #ifdef FEATURE_AP_MCC_CH_AVOIDANCE
 	tpAniSirGlobal mac_ctx = cds_get_context(QDF_MODULE_ID_PE);
@@ -2808,20 +2835,10 @@ void wma_vdev_resp_timer(void *data)
 		params->status = QDF_STATUS_E_TIMEOUT;
 
 		WMA_LOGA("%s: WMA_DEL_STA_SELF_REQ timedout", __func__);
-		if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true) {
+		if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
 			QDF_BUG(0);
-		} else {
-			sme_msg.type = eWNI_SME_DEL_STA_SELF_RSP;
-			sme_msg.bodyptr = iface->del_staself_req;
-			sme_msg.bodyval = 0;
-
-			status = scheduler_post_msg(QDF_MODULE_ID_SME,
-						     &sme_msg);
-			if (!QDF_IS_STATUS_SUCCESS(status)) {
-				WMA_LOGE("Failed to post eWNI_SME_ADD_STA_SELF_RSP");
-				qdf_mem_free(iface->del_staself_req);
-			}
-		}
+		else
+			wma_send_del_sta_self_resp(iface->del_staself_req);
 		if (iface->addBssStaContext)
 			qdf_mem_free(iface->addBssStaContext);
 		if (iface->staKeyParams)
@@ -2898,6 +2915,9 @@ struct wma_target_req *wma_fill_vdev_req(tp_wma_handle wma,
 	struct wma_target_req *req;
 	QDF_STATUS status;
 
+	if (cds_is_driver_recovering())
+		return NULL;
+
 	req = qdf_mem_malloc(sizeof(*req));
 	if (!req) {
 		WMA_LOGE("%s: Failed to allocate memory for msg %d vdev %d",
@@ -2906,14 +2926,11 @@ struct wma_target_req *wma_fill_vdev_req(tp_wma_handle wma,
 	}
 
 	WMA_LOGD("%s: vdev_id %d msg %d", __func__, vdev_id, msg_type);
+	qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	req->vdev_id = vdev_id;
 	req->msg_type = msg_type;
 	req->type = type;
 	req->user_data = params;
-	qdf_mc_timer_init(&req->event_timeout, QDF_TIMER_TYPE_SW,
-			  wma_vdev_resp_timer, req);
-	qdf_mc_timer_start(&req->event_timeout, timeout);
-	qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	status = qdf_list_insert_back(&wma->vdev_resp_queue, &req->node);
 	if (QDF_STATUS_SUCCESS != status) {
 		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
@@ -2922,8 +2939,10 @@ struct wma_target_req *wma_fill_vdev_req(tp_wma_handle wma,
 		qdf_mem_free(req);
 		return NULL;
 	}
-
 	qdf_spin_unlock_bh(&wma->vdev_respq_lock);
+	qdf_mc_timer_init(&req->event_timeout, QDF_TIMER_TYPE_SW,
+			  wma_vdev_resp_timer, req);
+	qdf_mc_timer_start(&req->event_timeout, timeout);
 	return req;
 }
 

+ 35 - 45
core/wma/src/wma_main.c

@@ -1701,7 +1701,6 @@ static void wma_cleanup_vdev_resp_queue(tp_wma_handle wma)
 {
 	struct wma_target_req *req_msg = NULL;
 	qdf_list_node_t *node1 = NULL;
-	QDF_STATUS status;
 
 	qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	if (!qdf_list_size(&wma->vdev_resp_queue)) {
@@ -1710,23 +1709,48 @@ static void wma_cleanup_vdev_resp_queue(tp_wma_handle wma)
 		return;
 	}
 
-	while (qdf_list_peek_front(&wma->vdev_resp_queue, &node1) !=
+	while (qdf_list_peek_front(&wma->vdev_resp_queue, &node1) ==
 				   QDF_STATUS_SUCCESS) {
 		req_msg = qdf_container_of(node1, struct wma_target_req, node);
-		status = qdf_list_remove_node(&wma->vdev_resp_queue, node1);
 		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
-		if (status != QDF_STATUS_SUCCESS) {
-			WMA_LOGE(FL("Failed to remove req vdev_id %d type %d"),
-				 req_msg->vdev_id, req_msg->type);
-			return;
-		}
-		qdf_mc_timer_destroy(&req_msg->event_timeout);
-		qdf_mem_free(req_msg);
+		qdf_mc_timer_stop(&req_msg->event_timeout);
+		wma_vdev_resp_timer(req_msg);
 		qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	}
 	qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 }
 
+/**
+ * wma_cleanup_hold_req() - cleanup hold request queue
+ * @wma: wma handle
+ *
+ * Return: none
+ */
+static void wma_cleanup_hold_req(tp_wma_handle wma)
+{
+	struct wma_target_req *req_msg = NULL;
+	qdf_list_node_t *node1 = NULL;
+
+	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
+	if (!qdf_list_size(&wma->wma_hold_req_queue)) {
+		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+		WMA_LOGD(FL("request queue is empty"));
+		return;
+	}
+
+	while (QDF_STATUS_SUCCESS ==
+			qdf_list_peek_front(&wma->wma_hold_req_queue, &node1)) {
+		req_msg = qdf_container_of(node1, struct wma_target_req, node);
+		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+		/* Cleanup timeout handler */
+		qdf_mc_timer_stop(&req_msg->event_timeout);
+		qdf_mc_timer_destroy(&req_msg->event_timeout);
+		wma_hold_req_timer(req_msg);
+		qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
+	}
+	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+}
+
 /**
  * wma_shutdown_notifier_cb - Shutdown notifer call back
  * @priv : WMA handle
@@ -1745,6 +1769,7 @@ static void wma_shutdown_notifier_cb(void *priv)
 
 	qdf_event_set(&wma_handle->wma_resume_event);
 	wma_cleanup_vdev_resp_queue(wma_handle);
+	wma_cleanup_hold_req(wma_handle);
 	pmo_ucfg_psoc_wakeup_host_event_received(wma_handle->psoc);
 }
 
@@ -3300,41 +3325,6 @@ end:
 	return qdf_status;
 }
 
-/**
- * wma_cleanup_hold_req() - cleanup hold request queue
- * @wma: wma handle
- *
- * Return: none
- */
-static void wma_cleanup_hold_req(tp_wma_handle wma)
-{
-	struct wma_target_req *req_msg = NULL;
-	qdf_list_node_t *node1 = NULL;
-	QDF_STATUS status;
-
-	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
-	if (!qdf_list_size(&wma->wma_hold_req_queue)) {
-		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
-		WMA_LOGD(FL("request queue is empty"));
-		return;
-	}
-
-	while (QDF_STATUS_SUCCESS !=
-			qdf_list_peek_front(&wma->wma_hold_req_queue, &node1)) {
-		req_msg = qdf_container_of(node1, struct wma_target_req, node);
-		status = qdf_list_remove_node(&wma->wma_hold_req_queue, node1);
-		if (QDF_STATUS_SUCCESS != status) {
-			qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
-			WMA_LOGE(FL("Failed to remove request for vdev_id %d type %d"),
-				 req_msg->vdev_id, req_msg->type);
-			return;
-		}
-		qdf_mc_timer_destroy(&req_msg->event_timeout);
-		qdf_mem_free(req_msg);
-	}
-	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
-}
-
 /**
  * wma_wmi_service_close() - close wma wmi service interface.
  * @cds_ctx: cds context