|
@@ -1681,6 +1681,39 @@ QDF_STATUS wma_set_peer_param(void *wma_ctx, uint8_t *peer_addr,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * wma_peer_unmap_conf_send - send peer unmap conf cmnd to fw
|
|
|
+ * @wma_ctx: wma handle
|
|
|
+ * @msg: peer unmap conf params
|
|
|
+ *
|
|
|
+ * Return: QDF_STATUS
|
|
|
+ */
|
|
|
+QDF_STATUS wma_peer_unmap_conf_send(tp_wma_handle wma,
|
|
|
+ struct send_peer_unmap_conf_params *msg)
|
|
|
+{
|
|
|
+ QDF_STATUS qdf_status;
|
|
|
+
|
|
|
+ if (!msg) {
|
|
|
+ WMA_LOGE("%s: null input params", __func__);
|
|
|
+ return QDF_STATUS_E_INVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ qdf_status = wmi_unified_peer_unmap_conf_send(
|
|
|
+ wma->wmi_handle,
|
|
|
+ msg->vdev_id,
|
|
|
+ msg->peer_id_cnt,
|
|
|
+ msg->peer_id_list);
|
|
|
+
|
|
|
+ if (qdf_status != QDF_STATUS_SUCCESS)
|
|
|
+ WMA_LOGE("%s: peer_unmap_conf_send failed %d",
|
|
|
+ __func__, qdf_status);
|
|
|
+
|
|
|
+ qdf_mem_free(msg->peer_id_list);
|
|
|
+ msg->peer_id_list = NULL;
|
|
|
+
|
|
|
+ return qdf_status;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* wma_peer_unmap_conf_cb - send peer unmap conf cmnd to fw
|
|
|
* @vdev_id: vdev id
|
|
@@ -1689,22 +1722,75 @@ QDF_STATUS wma_set_peer_param(void *wma_ctx, uint8_t *peer_addr,
|
|
|
*
|
|
|
* Return: QDF_STATUS
|
|
|
*/
|
|
|
-
|
|
|
QDF_STATUS wma_peer_unmap_conf_cb(uint8_t vdev_id,
|
|
|
uint32_t peer_id_cnt,
|
|
|
uint16_t *peer_id_list)
|
|
|
{
|
|
|
tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
|
|
|
+ QDF_STATUS qdf_status;
|
|
|
|
|
|
if (!wma) {
|
|
|
- WMA_LOGD("%s: peer_id_cnt: %d, null wma_handle",
|
|
|
+ WMA_LOGE("%s: peer_id_cnt: %d, null wma_handle",
|
|
|
__func__, peer_id_cnt);
|
|
|
return QDF_STATUS_E_INVAL;
|
|
|
}
|
|
|
|
|
|
- return wmi_unified_peer_unmap_conf_send(wma->wmi_handle,
|
|
|
+ qdf_status = wmi_unified_peer_unmap_conf_send(
|
|
|
+ wma->wmi_handle,
|
|
|
vdev_id, peer_id_cnt,
|
|
|
peer_id_list);
|
|
|
+
|
|
|
+ if (qdf_status == QDF_STATUS_E_BUSY) {
|
|
|
+ QDF_STATUS retcode;
|
|
|
+ struct scheduler_msg msg = {0};
|
|
|
+ struct send_peer_unmap_conf_params *peer_unmap_conf_req;
|
|
|
+ void *mac_ctx = cds_get_context(QDF_MODULE_ID_PE);
|
|
|
+
|
|
|
+ WMA_LOGD("%s: post unmap_conf cmd to MC thread", __func__);
|
|
|
+
|
|
|
+ if (!mac_ctx) {
|
|
|
+ WMA_LOGE("%s: mac_ctx is NULL", __func__);
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
+ }
|
|
|
+
|
|
|
+ peer_unmap_conf_req = qdf_mem_malloc(sizeof(
|
|
|
+ struct send_peer_unmap_conf_params));
|
|
|
+
|
|
|
+ if (!peer_unmap_conf_req) {
|
|
|
+ WMA_LOGE("%s: peer_unmap_conf_req memory alloc failed",
|
|
|
+ __func__);
|
|
|
+ return QDF_STATUS_E_NOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ peer_unmap_conf_req->vdev_id = vdev_id;
|
|
|
+ peer_unmap_conf_req->peer_id_cnt = peer_id_cnt;
|
|
|
+ peer_unmap_conf_req->peer_id_list = qdf_mem_malloc(
|
|
|
+ sizeof(uint16_t) * peer_id_cnt);
|
|
|
+ if (!peer_unmap_conf_req->peer_id_list) {
|
|
|
+ WMA_LOGE("%s: peer_id_list memory alloc failed",
|
|
|
+ __func__);
|
|
|
+ qdf_mem_free(peer_unmap_conf_req);
|
|
|
+ peer_unmap_conf_req = NULL;
|
|
|
+ return QDF_STATUS_E_NOMEM;
|
|
|
+ }
|
|
|
+ qdf_mem_copy(peer_unmap_conf_req->peer_id_list,
|
|
|
+ peer_id_list, sizeof(uint16_t) * peer_id_cnt);
|
|
|
+
|
|
|
+ msg.type = WMA_SEND_PEER_UNMAP_CONF;
|
|
|
+ msg.reserved = 0;
|
|
|
+ msg.bodyptr = peer_unmap_conf_req;
|
|
|
+ msg.bodyval = 0;
|
|
|
+
|
|
|
+ retcode = wma_post_ctrl_msg(mac_ctx, &msg);
|
|
|
+ if (retcode != QDF_STATUS_SUCCESS) {
|
|
|
+ WMA_LOGE("%s: wma_post_ctrl_msg failed", __func__);
|
|
|
+ qdf_mem_free(peer_unmap_conf_req->peer_id_list);
|
|
|
+ qdf_mem_free(peer_unmap_conf_req);
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return qdf_status;
|
|
|
}
|
|
|
|
|
|
/**
|