Parcourir la source

qcacld-3.0: Add support for NDP Responder Request

This is qcacld-2.0 to qcacld-3.0 propagation

Add implementation for non-auto accept NDP responder request.

Change-Id: I2ced33cfa0faf487d8e31275493e8d85251e8c59
CRs-Fixed: 962367
Abhishek Singh il y a 8 ans
Parent
commit
4fef747c51

+ 288 - 1
core/hdd/src/wlan_hdd_nan_datapath.c

@@ -563,7 +563,110 @@ static int hdd_ndp_initiator_req_handler(hdd_context_t *hdd_ctx,
 static int hdd_ndp_responder_req_handler(hdd_context_t *hdd_ctx,
 						struct nlattr **tb)
 {
-	return 0;
+	hdd_adapter_t *adapter;
+	char *iface_name;
+	struct ndp_responder_req req = {0};
+	QDF_STATUS status;
+	int ret = 0;
+	struct nan_datapath_ctx *ndp_ctx;
+	uint32_t ndp_qos_cfg;
+
+	ENTER();
+
+	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) {
+		hddLog(LOGE, FL("Interface name string is unavailable"));
+		return -EINVAL;
+	}
+
+	iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]);
+	/* Check if there is already an existing NAN interface */
+	adapter = hdd_get_adapter(hdd_ctx, QDF_NDI_MODE);
+	if (!adapter) {
+		hddLog(LOGE,
+			FL("NAN data interface %s not available"), iface_name);
+		return -EINVAL;
+	}
+
+	if (QDF_NDI_MODE != adapter->device_mode) {
+		hddLog(LOGE,
+			FL("Interface %s not in NDI mode"), iface_name);
+		return -EINVAL;
+	}
+
+	/* NAN data path coexists only with STA interface */
+	if (!hdd_is_ndp_allowed(hdd_ctx)) {
+		hddLog(LOGE, FL("Unsupported concurrency for NAN datapath"));
+		return -EINVAL;
+	}
+
+	ndp_ctx = WLAN_HDD_GET_NDP_CTX_PTR(adapter);
+
+	if (ndp_ctx->state == NAN_DATA_NDI_DELETED_STATE ||
+	    ndp_ctx->state == NAN_DATA_NDI_DELETING_STATE ||
+	    ndp_ctx->state == NAN_DATA_NDI_CREATING_STATE) {
+		hddLog(LOGE,
+			FL("Data request not allowed in current NDI state: %d"),
+			ndp_ctx->state);
+		return -EAGAIN;
+	}
+
+	req.vdev_id = adapter->sessionId;
+
+	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) {
+		hddLog(LOGE, FL("Transaction ID is unavailable"));
+		return -EINVAL;
+	}
+	req.transaction_id =
+		nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]);
+
+	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID]) {
+		hddLog(LOGE, FL("Instance ID is unavailable"));
+		return -EINVAL;
+	}
+	req.ndp_instance_id =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID]);
+
+	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]) {
+		hddLog(LOGE, FL("ndp_rsp is unavailable"));
+		return -EINVAL;
+	}
+	req.ndp_rsp = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]);
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]) {
+		req.ndp_info.ndp_app_info_len =
+			nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]);
+		if (req.ndp_info.ndp_app_info_len) {
+			req.ndp_info.ndp_app_info =
+				nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]);
+		}
+	} else {
+		hddLog(LOG1, FL("NDP app info is unavailable"));
+	}
+	if (tb[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS]) {
+		/* at present ndp config stores 4 bytes QOS info only */
+		req.ndp_config.ndp_cfg_len = 4;
+		ndp_qos_cfg =
+			nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS]);
+		req.ndp_config.ndp_cfg = (uint8_t *)&ndp_qos_cfg;
+	} else {
+		hddLog(LOG1, FL("NDP config data is unavailable"));
+	}
+
+	hddLog(LOG1,
+		FL("vdev_id: %d, transaction_id: %d, ndp_rsp %d, ndp_instance_id: %d, ndp_app_info_len: %d"),
+		req.vdev_id, req.transaction_id, req.ndp_rsp,
+		req.ndp_instance_id, req.ndp_info.ndp_app_info_len);
+
+	status = sme_ndp_responder_req_handler(hdd_ctx->hHal, &req);
+	if (status != QDF_STATUS_SUCCESS) {
+		hddLog(LOGE,
+			FL("sme_ndp_initiator_req_handler failed, status: %d"),
+			status);
+		ret = -EINVAL;
+	}
+
+	EXIT();
+	return ret;
 }
 
 /**
@@ -1126,12 +1229,135 @@ ndp_confirm_nla_failed:
  * @adapter: pointer to adapter context
  * @ind_params: indication parameters
  *
+ * Following vendor event is sent to cfg80211:
+ * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD =
+ *         QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND (4 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR (IFNAMSIZ)
+ * QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID (2 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR (6 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR (6 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID (4 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO (ndp_app_info_len size)
+ * QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS (4 bytes)
+ *
  * Return: none
  */
 static void hdd_ndp_indication_handler(hdd_adapter_t *adapter,
 						void *ind_params)
 {
+	struct sk_buff *vendor_event;
+	hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	struct ndp_indication_event *event = ind_params;
+	uint32_t ndp_qos_config;
+	struct nan_datapath_ctx *ndp_ctx;
+	uint16_t data_len;
+
+	ENTER();
+	if (!ind_params) {
+		hddLog(LOGE, FL("Invalid NDP Indication"));
+		return;
+	}
+
+	if (0 != wlan_hdd_validate_context(hdd_ctx))
+		return;
+
+	/* Handle only if adapter is in NDI mode */
+	if (QDF_NDI_MODE != adapter->device_mode) {
+		hddLog(LOGE, FL("Adapter is not in NDI mode"));
+		return;
+	}
+
+	hddLog(LOG1,
+		FL("NDP Indication, policy: %d"), event->policy);
+
+	/* Policy check */
+	if (!WLAN_HDD_IS_NDP_ENABLED(hdd_ctx)) {
+		hddLog(LOGE, FL("NAN datapath is not suported"));
+		return;
+	}
+
+	/* NAN data path coexists only with STA interface */
+	if (!hdd_is_ndp_allowed(hdd_ctx)) {
+		hddLog(LOGE, FL("Unsupported concurrency for NAN datapath"));
+		return;
+	}
+
+	ndp_ctx = WLAN_HDD_GET_NDP_CTX_PTR(adapter);
+
+	/* check if we are in middle of deleting/creating the interface */
+	if (ndp_ctx->state == NAN_DATA_NDI_DELETED_STATE ||
+	    ndp_ctx->state == NAN_DATA_NDI_DELETING_STATE ||
+	    ndp_ctx->state == NAN_DATA_NDI_CREATING_STATE) {
+		hddLog(LOGE,
+			FL("Data request not allowed in current NDI state: %d"),
+			ndp_ctx->state);
+		return;
+	}
+
+	data_len = 3 * sizeof(uint32_t) + sizeof(uint16_t) +
+		2 * QDF_MAC_ADDR_SIZE + IFNAMSIZ +
+		event->ndp_info.ndp_app_info_len + 8 * NLA_HDRLEN +
+		NLMSG_HDRLEN;
+
+	/* notify response to the upper layer */
+	vendor_event = cfg80211_vendor_event_alloc(hdd_ctx->wiphy,
+					NULL, data_len,
+					QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX,
+					GFP_KERNEL);
+	if (!vendor_event) {
+		hddLog(LOGE, FL("cfg80211_vendor_event_alloc failed"));
+		return;
+	}
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD,
+	   QCA_WLAN_VENDOR_ATTR_NDP_REQUEST_IND))
+		goto ndp_indication_nla_failed;
+
+	if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR,
+	   IFNAMSIZ, adapter->dev->name))
+		goto ndp_indication_nla_failed;
+
+	if (nla_put_u16(vendor_event,
+	   QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID,
+	   event->service_instance_id))
+		goto ndp_indication_nla_failed;
+
+	if (nla_put(vendor_event,
+	   QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR,
+	   QDF_MAC_ADDR_SIZE, event->peer_mac_addr.bytes))
+		goto ndp_indication_nla_failed;
+
+	if (nla_put(vendor_event,
+	   QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR ,
+	   QDF_MAC_ADDR_SIZE, event->peer_discovery_mac_addr.bytes))
+		goto ndp_indication_nla_failed;
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID,
+	   event->ndp_instance_id))
+		goto ndp_indication_nla_failed;
+
+	if (event->ndp_info.ndp_app_info_len)
+		if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO,
+		   event->ndp_info.ndp_app_info_len,
+		   event->ndp_info.ndp_app_info))
+			goto ndp_indication_nla_failed;
+
+	if (event->ndp_config.ndp_cfg_len) {
+		ndp_qos_config = *((uint32_t *)event->ndp_config.ndp_cfg);
+		/* at present ndp config stores 4 bytes QOS info only */
+		if (nla_put_u32(vendor_event,
+		   QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS,
+		   ndp_qos_config))
+			goto ndp_indication_nla_failed;
+	}
+
+	cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+	EXIT();
 	return;
+ndp_indication_nla_failed:
+	hddLog(LOGE, FL("nla_put api failed"));
+	kfree_skb(vendor_event);
+	EXIT();
 }
 
 /**
@@ -1139,12 +1365,73 @@ static void hdd_ndp_indication_handler(hdd_adapter_t *adapter,
  * @adapter: pointer to adapter context
  * @rsp_params: response parameters
  *
+ * Following vendor event is sent to cfg80211:
+ * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD =
+ *         QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE (4 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID (2 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_TYPE (4 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE (4 bytes)
+ *
  * Return: none
  */
 static void hdd_ndp_responder_rsp_handler(hdd_adapter_t *adapter,
 							void *rsp_params)
 {
+	struct sk_buff *vendor_event;
+	hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	struct ndp_responder_rsp_event *rsp = rsp_params;
+	uint16_t data_len;
+
+	ENTER();
+	if (!rsp) {
+		hddLog(LOGE, FL("Invalid NDP Responder response"));
+		return;
+	}
+
+	if (0 != wlan_hdd_validate_context(hdd_ctx))
+		return;
+
+	hddLog(LOG1,
+		FL("NDP Responder,vdev id %d transaction_id %d status code: %d reason %d"),
+		rsp->vdev_id, rsp->transaction_id,
+		rsp->status, rsp->reason);
+
+	data_len = 3 * sizeof(uint32_t) + sizeof(uint16_t) +
+		4 * NLA_HDRLEN + NLMSG_HDRLEN;
+	/* notify response to the upper layer */
+	vendor_event = cfg80211_vendor_event_alloc(hdd_ctx->wiphy,
+					NULL, data_len,
+					QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX,
+					GFP_KERNEL);
+	if (!vendor_event) {
+		hddLog(LOGE, FL("cfg80211_vendor_event_alloc failed"));
+		return;
+	}
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD,
+	   QCA_WLAN_VENDOR_ATTR_NDP_RESPONDER_RESPONSE))
+		goto ndp_responder_rsp_nla_failed;
+
+	if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID,
+	   rsp->transaction_id))
+		goto ndp_responder_rsp_nla_failed;
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_TYPE,
+	   rsp->status))
+		goto ndp_responder_rsp_nla_failed;
+
+	if (nla_put_u32(vendor_event,
+	   QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE,
+	   rsp->reason))
+		goto ndp_responder_rsp_nla_failed;
+
+	cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+	EXIT();
 	return;
+ndp_responder_rsp_nla_failed:
+	hddLog(LOGE, FL("nla_put api failed"));
+	kfree_skb(vendor_event);
+	EXIT();
 }
 
 /**

+ 2 - 0
core/mac/inc/sir_api.h

@@ -6094,6 +6094,7 @@ struct ndp_responder_req {
  * @vdev_id: session id of the interface over which ndp is being created
  * @status: command status
  * @reason: reason for failure if any
+ * @peer_mac_addr: Peer's mac address
  *
  */
 struct ndp_responder_rsp_event {
@@ -6101,6 +6102,7 @@ struct ndp_responder_rsp_event {
 	uint32_t vdev_id;
 	uint32_t status;
 	uint32_t reason;
+	struct qdf_mac_addr peer_mac_addr;
 };
 
 /**

+ 1 - 0
core/mac/src/pe/lim/lim_process_message_queue.c

@@ -1453,6 +1453,7 @@ void lim_process_messages(tpAniSirGlobal mac_ctx, tpSirMsgQ msg)
 	case eWNI_SME_REGISTER_MGMT_FRAME_CB:
 	case eWNI_SME_EXT_CHANGE_CHANNEL:
 	case eWNI_SME_NDP_INITIATOR_REQ:
+	case eWNI_SME_NDP_RESPONDER_REQ:
 		/* These messages are from HDD.No need to respond to HDD */
 		lim_process_normal_hdd_msg(mac_ctx, msg, false);
 		break;

+ 1 - 0
core/mac/src/pe/lim/lim_process_sme_req_messages.c

@@ -5171,6 +5171,7 @@ bool lim_process_sme_req_messages(tpAniSirGlobal pMac, tpSirMsgQ pMsg)
 	case eWNI_SME_PDEV_SET_HT_VHT_IE:
 		lim_process_set_pdev_IEs(pMac, pMsgBuf);
 	case eWNI_SME_NDP_INITIATOR_REQ:
+	case eWNI_SME_NDP_RESPONDER_REQ:
 		lim_handle_ndp_request_message(pMac, pMsg);
 		break;
 	default:

+ 180 - 50
core/mac/src/pe/nan/nan_datapath.c

@@ -33,15 +33,15 @@
 #include "wma_nan_datapath.h"
 
 /**
- * lim_send_ndp_event_to_sme() - generic function to pepare and send NDP message
- * to SME directly.
- * @mac_ctx: handle to mac structure
+ * lim_send_ndp_event_to_sme() - generic function to prepare and send NDP
+ * message to SME.
+ * @mac_ctx: handle to mac context structure
  * @msg_type: sme message type to send
  * @body_ptr: buffer
  * @len: buffer length
  * @body_val: value
  *
- * Return: Nothing
+ * Return: None
  */
 static void lim_send_ndp_event_to_sme(tpAniSirGlobal mac_ctx, uint32_t msg_type,
 				void *body_ptr, uint32_t len, uint32_t body_val)
@@ -62,6 +62,61 @@ static void lim_send_ndp_event_to_sme(tpAniSirGlobal mac_ctx, uint32_t msg_type,
 	lim_sys_process_mmh_msg_api(mac_ctx, &mmh_msg, ePROT);
 }
 
+/**
+ * lim_add_ndi_peer() - Function to add ndi peer
+ * @mac_ctx: handle to mac structure
+ * @vdev_id: vdev id on which peer is added
+ * @peer_mac_addr: peer to be added
+ *
+ * Return: QDF_STATUS_SUCCESS on success; error number otherwise
+ */
+static QDF_STATUS lim_add_ndi_peer(tpAniSirGlobal mac_ctx,
+	uint32_t vdev_id, struct qdf_mac_addr peer_mac_addr)
+{
+	tpPESession session;
+	tpDphHashNode sta_ds;
+	uint16_t assoc_id, peer_idx;
+	tSirRetStatus status;
+
+	session = pe_find_session_by_sme_session_id(mac_ctx,
+						vdev_id);
+	if (session == NULL) {
+		/* couldn't find session */
+		return QDF_STATUS_E_FAILURE;
+	}
+	sta_ds = dph_lookup_hash_entry(mac_ctx,
+				peer_mac_addr.bytes,
+				&assoc_id, &session->dph.dphHashTable);
+	/* peer exists, don't do anything */
+	if (sta_ds != NULL) {
+		lim_log(mac_ctx, LOGE, FL("NDI Peer already exists!!"));
+		return QDF_STATUS_SUCCESS;
+	}
+	lim_log(mac_ctx, LOG1,
+		FL("Need to create NDI Peer :" MAC_ADDRESS_STR),
+		MAC_ADDR_ARRAY(peer_mac_addr.bytes));
+	peer_idx = lim_assign_peer_idx(mac_ctx, session);
+	sta_ds = dph_add_hash_entry(mac_ctx, peer_mac_addr.bytes, peer_idx,
+			&session->dph.dphHashTable);
+	if (sta_ds == NULL) {
+		lim_log(mac_ctx, LOGE,
+			FL("Couldn't add dph entry"));
+		/* couldn't add dph entry */
+		return QDF_STATUS_E_FAILURE;
+	}
+	/* wma decides NDI mode from wma->inferface struct */
+	sta_ds->staType = STA_ENTRY_NDI_PEER;
+	status = lim_add_sta(mac_ctx, sta_ds, false, session);
+	if (eSIR_SUCCESS != status) {
+		/* couldn't add peer */
+		lim_log(mac_ctx, LOGE,
+			FL("limAddSta failed status: %d"),
+			status);
+		return QDF_STATUS_E_FAILURE;
+	}
+	return QDF_STATUS_SUCCESS;
+}
+
 /**
  * lim_handle_ndp_indication_event() - Function to handle SIR_HAL_NDP_INDICATION
  * event from WMA
@@ -73,10 +128,7 @@ static void lim_send_ndp_event_to_sme(tpAniSirGlobal mac_ctx, uint32_t msg_type,
 static QDF_STATUS lim_handle_ndp_indication_event(tpAniSirGlobal mac_ctx,
 					struct ndp_indication_event *ndp_ind)
 {
-	tpPESession session;
-	tpDphHashNode sta_ds;
-	uint16_t assoc_id, peer_idx;
-	tSirRetStatus status;
+	QDF_STATUS status;
 
 	lim_log(mac_ctx, LOG1,
 		FL("role: %d, vdev: %d, peer_mac_addr "MAC_ADDRESS_STR),
@@ -84,47 +136,28 @@ static QDF_STATUS lim_handle_ndp_indication_event(tpAniSirGlobal mac_ctx,
 		MAC_ADDR_ARRAY(ndp_ind->peer_mac_addr.bytes));
 
 	if (ndp_ind->role == NDP_ROLE_INITIATOR) {
+		/* Free config only for INITIATOR role */
+		qdf_mem_free(ndp_ind->ndp_config.ndp_cfg);
+		qdf_mem_free(ndp_ind->ndp_info.ndp_app_info);
 
-		session = pe_find_session_by_sme_session_id(mac_ctx,
-							    ndp_ind->vdev_id);
-		if (session == NULL) {
-			lim_log(mac_ctx, LOGE,
-				FL("Couldn't find session, vdev_id: %d, ndp_role: %d"),
-				ndp_ind->vdev_id, ndp_ind->role);
-			goto ndp_indication_failed;
-		}
-		sta_ds = dph_lookup_hash_entry(mac_ctx,
-					ndp_ind->peer_mac_addr.bytes,
-					&assoc_id, &session->dph.dphHashTable);
-		/* peer exists, don't do anything */
-		if (sta_ds != NULL) {
-			lim_log(mac_ctx, LOGE, FL("NDI Peer already exists!!"));
-			return QDF_STATUS_SUCCESS;
-		}
-
-		/* else create one */
-		lim_log(mac_ctx, LOG1, FL("Need to create NDI Peer!!"));
-		peer_idx = lim_assign_peer_idx(mac_ctx, session);
-		sta_ds = dph_add_hash_entry(mac_ctx,
-					ndp_ind->peer_mac_addr.bytes,
-					peer_idx, &session->dph.dphHashTable);
-		if (sta_ds == NULL) {
+		status = lim_add_ndi_peer(mac_ctx, ndp_ind->vdev_id,
+				ndp_ind->peer_mac_addr);
+		if (QDF_STATUS_SUCCESS != status) {
 			lim_log(mac_ctx, LOGE,
-				FL("Couldn't add dph entry, ndp_role: %d"),
+				FL("Couldn't add ndi peer, ndp_role: %d"),
 				ndp_ind->role);
 			goto ndp_indication_failed;
 		}
-		/* wma decides NDI mode from wma->inferface struct */
-		sta_ds->staType = STA_ENTRY_NDI_PEER;
-		status = lim_add_sta(mac_ctx, sta_ds, false, session);
-		if (eSIR_SUCCESS != status) {
-			lim_log(mac_ctx, LOGE,
-			       FL("limAddSta failed status: %d, ndp_role: %d"),
-				status, ndp_ind->role);
-			goto ndp_indication_failed;
+	} else if (NDP_ROLE_RESPONDER == ndp_ind->role) {
+		/*
+		 * For RESPONDER role ndp_cfg and app_info sent till HDD
+		 * will be freed in sme.
+		 */
+		if (NDP_ACCEPT_POLICY_ALL != ndp_ind->policy) {
+			lim_send_ndp_event_to_sme(mac_ctx,
+				eWNI_SME_NDP_INDICATION,
+				ndp_ind, sizeof(*ndp_ind), 0);
 		}
-	} else {
-		/* Processing for NDP Data Reponder role */
 	}
 	/*
 	 * With NDP indication if peer does not exists already add_sta is
@@ -140,7 +173,47 @@ ndp_indication_failed:
 }
 
 /**
- * lim_handle_ndp_event_message() - Handler for NDP events from WMA
+ * lim_ndp_responder_rsp_handler() - Handler for NDP responder rsp
+ * @mac_ctx: handle to mac structure
+ * @ndp_rsp: pointer to rsp message
+ * @bodyval: value
+ *
+ * Return: QDF_STATUS_SUCCESS on success; error number otherwise
+ */
+static QDF_STATUS lim_ndp_responder_rsp_handler(tpAniSirGlobal mac_ctx,
+	struct ndp_responder_rsp_event *rsp_ind, uint32_t bodyval)
+{
+	QDF_STATUS ret_val = QDF_STATUS_SUCCESS;
+
+	if ((NULL == rsp_ind) || bodyval) {
+		lim_log(mac_ctx, LOGE,
+			FL("rsp_ind is NULL or bodyval %d"), bodyval);
+		/* msg to unblock SME, but not send rsp to HDD */
+		bodyval = true;
+		ret_val = QDF_STATUS_E_INVAL;
+		goto responder_rsp;
+	}
+
+	if (QDF_STATUS_SUCCESS == rsp_ind->status) {
+		ret_val = lim_add_ndi_peer(mac_ctx, rsp_ind->vdev_id,
+				rsp_ind->peer_mac_addr);
+		if (QDF_STATUS_SUCCESS != ret_val) {
+			lim_log(mac_ctx, LOGE,
+				FL("Couldn't add ndi peer"));
+			rsp_ind->status = QDF_STATUS_E_FAILURE;
+		}
+	}
+
+responder_rsp:
+	/* send eWNI_SME_NDP_RESPONDER_RSP */
+	lim_send_ndp_event_to_sme(mac_ctx, eWNI_SME_NDP_RESPONDER_RSP,
+				bodyval ? NULL : rsp_ind,
+				bodyval ? 0 : sizeof(*rsp_ind), bodyval);
+	return ret_val;
+}
+
+/**
+ * lim_handle_ndp_event_message() - Handler for NDP events/RSP from WMA
  * @mac_ctx: handle to mac structure
  * @msg: pointer to message
  *
@@ -168,6 +241,10 @@ QDF_STATUS lim_handle_ndp_event_message(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 		qdf_mem_free(ndp_ind->ndp_info.ndp_app_info);
 		break;
 	}
+	case SIR_HAL_NDP_RESPONDER_RSP:
+		status = lim_ndp_responder_rsp_handler(mac_ctx, msg->bodyptr,
+					msg->bodyval);
+		break;
 	default:
 		lim_log(mac_ctx, LOGE,
 			FL("Unhandled NDP event: %d"), msg->type);
@@ -232,12 +309,61 @@ send_initiator_rsp:
 }
 
 /**
-* lim_handle_ndp_request_message() - Handler for NDP req from SME
-* @mac_ctx: handle to mac structure
-* @msg: pointer to message
-*
-* Return: QDF_STATUS_SUCCESS on success; error number otherwise
-*/
+ * lim_process_sme_ndp_responder_req() - Handler for NDP responder req
+ * @mac_ctx: handle to mac structure
+ * @ndp_msg: pointer to message
+ *
+ * Return: QDF_STATUS_SUCCESS on success or failure code in case of failure
+ */
+static QDF_STATUS lim_process_sme_ndp_responder_req(tpAniSirGlobal mac_ctx,
+	struct sir_sme_ndp_responder_req *lim_msg)
+{
+	tSirMsgQ msg;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct ndp_responder_req *responder_req;
+
+	if (NULL == lim_msg) {
+		lim_log(mac_ctx, LOGE, FL("ndp_msg is NULL"));
+		status = QDF_STATUS_E_INVAL;
+		goto send_failure_rsp;
+	}
+	responder_req = qdf_mem_malloc(sizeof(*responder_req));
+	if (NULL == responder_req) {
+		lim_log(mac_ctx, LOGE,
+			FL("Unable to allocate memory for responder_req"));
+		status = QDF_STATUS_E_NOMEM;
+		goto send_failure_rsp;
+	}
+	qdf_mem_copy(responder_req, &lim_msg->req, sizeof(*responder_req));
+	msg.type = SIR_HAL_NDP_RESPONDER_REQ;
+	msg.reserved = 0;
+	msg.bodyptr = responder_req;
+	msg.bodyval = 0;
+
+	lim_log(mac_ctx, LOG1, FL("sending SIR_HAL_NDP_RESPONDER_REQ to WMA"));
+	MTRACE(mac_trace_msg_tx(mac_ctx, NO_SESSION, msg.type));
+
+	if (eSIR_SUCCESS != wma_post_ctrl_msg(mac_ctx, &msg)) {
+		lim_log(mac_ctx, LOGP, FL("wma_post_ctrl_msg failed"));
+		status = QDF_STATUS_E_FAILURE;
+		qdf_mem_free(responder_req);
+		goto send_failure_rsp;
+	}
+	return status;
+send_failure_rsp:
+	/* msg to unblock SME, but not send rsp to HDD */
+	lim_send_ndp_event_to_sme(mac_ctx, eWNI_SME_NDP_RESPONDER_RSP,
+				NULL, 0, true);
+	return status;
+}
+
+/**
+ * lim_handle_ndp_request_message() - Handler for NDP req from SME
+ * @mac_ctx: handle to mac structure
+ * @msg: pointer to message
+ *
+ * Return: QDF_STATUS_SUCCESS on success; error number otherwise
+ */
 QDF_STATUS lim_handle_ndp_request_message(tpAniSirGlobal mac_ctx,
 					  tpSirMsgQ msg)
 {
@@ -248,6 +374,10 @@ QDF_STATUS lim_handle_ndp_request_message(tpAniSirGlobal mac_ctx,
 		status = lim_process_sme_ndp_initiator_req(mac_ctx,
 							   msg->bodyptr);
 		break;
+	case eWNI_SME_NDP_RESPONDER_REQ:
+		status = lim_process_sme_ndp_responder_req(mac_ctx,
+							 msg->bodyptr);
+		break;
 	default:
 		lim_log(mac_ctx, LOGE, FL("Unhandled NDP request: %d"),
 		       msg->type);

+ 1 - 0
core/sme/inc/sme_inside.h

@@ -194,6 +194,7 @@ typedef struct tagSmeCmd {
 		struct sir_antenna_mode_param set_antenna_mode_cmd;
 #ifdef WLAN_FEATURE_NAN_DATAPATH
 		struct ndp_initiator_req initiator_req;
+		struct ndp_responder_req responder_req;
 #endif
 	} u;
 } tSmeCmd;

+ 1 - 0
core/sme/inc/sme_internal.h

@@ -94,6 +94,7 @@ typedef enum eSmeCommandType {
 	e_sme_command_set_dual_mac_config,
 	e_sme_command_set_antenna_mode,
 	eSmeCommandNdpInitiatorRequest,
+	eSmeCommandNdpResponderRequest,
 } eSmeCommandType;
 
 typedef enum eSmeState {

+ 27 - 4
core/sme/inc/sme_nan_datapath.h

@@ -27,6 +27,8 @@
 #ifndef __SME_NAN_DATAPATH_H
 #define __SME_NAN_DATAPATH_H
 
+#include "csr_inside_api.h"
+
 #ifdef WLAN_FEATURE_NAN_DATAPATH
 #include "qdf_types.h"
 #include "sir_api.h"
@@ -46,12 +48,26 @@ struct sir_sme_ndp_initiator_req {
 	struct ndp_initiator_req req;
 };
 
-/* NAN initiator request handler */
+/**
+ * struct sir_sme_ndp_responder_req - Wraper of responder's response
+ * to ndp create request
+ * @msg_type: SME msg type
+ * @msg_len: Length of msg
+ * @req: responder's response to ndp create request
+ *
+ */
+struct sir_sme_ndp_responder_req {
+	uint16_t msg_type;
+	uint16_t msg_len;
+	struct ndp_responder_req req;
+};
+
+/* NaN initiator request handler */
 QDF_STATUS sme_ndp_initiator_req_handler(tHalHandle hal,
 					struct ndp_initiator_req *req_params);
 
-/* NAN responder request handler */
-QDF_STATUS sme_ndp_responder_req_handler(uint32_t session_id,
+/* NaN responder request handler */
+QDF_STATUS sme_ndp_responder_req_handler(tHalHandle hal,
 					struct ndp_responder_req *req_params);
 
 /* NAN indication response handler */
@@ -92,6 +108,8 @@ QDF_STATUS csr_process_ndp_initiator_request(tpAniSirGlobal mac_ctx,
 
 void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg);
 
+QDF_STATUS csr_process_ndp_responder_request(tpAniSirGlobal mac_ctx,
+							tSmeCmd *cmd);
 #else
 
 /* Start NDI BSS */
@@ -135,7 +153,7 @@ QDF_STATUS sme_ndp_end_req_handler(uint32_t session_id,
 QDF_STATUS sme_ndp_sched_req_handler(uint32_t session_id,
 				struct ndp_schedule_update_req *req_params);
 
-static inline eHalStatus csr_process_ndp_initiator_request(
+static inline QDF_STATUS csr_process_ndp_initiator_request(
 				tpAniSirGlobal mac_ctx, tSmeCmd *cmd)
 {
 	return QDF_STATUS_SUCCESS;
@@ -145,5 +163,10 @@ static inline void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 {
 }
 
+static inline QDF_STATUS csr_process_ndp_responder_request(
+			tpAniSirGlobal mac_ctx, tSmeCmd *cmd)
+{
+	return QDF_STATUS_SUCCESS;
+}
 #endif /* WLAN_FEATURE_NAN_DATAPATH */
 #endif /* __SME_NAN_DATAPATH_H */

+ 12 - 1
core/sme/src/common/sme_api.c

@@ -965,7 +965,16 @@ sme_process_cmd:
 				&pMac->sme.smeCmdActiveList,
 				&pCommand->Link, LL_ACCESS_LOCK))
 				csr_release_command(pMac, pCommand);
-	    break;
+		break;
+	case eSmeCommandNdpResponderRequest:
+		csr_ll_unlock(&pMac->sme.smeCmdActiveList);
+		status = csr_process_ndp_responder_request(pMac, pCommand);
+		if (status != QDF_STATUS_SUCCESS) {
+			if (csr_ll_remove_entry(&pMac->sme.smeCmdActiveList,
+					     &pCommand->Link, LL_ACCESS_LOCK))
+				csr_release_command(pMac, pCommand);
+		}
+		break;
 	case eSmeCommandDelStaSession:
 		csr_ll_unlock(&pMac->sme.smeCmdActiveList);
 		csr_process_del_sta_session_command(pMac, pCommand);
@@ -2998,6 +3007,8 @@ QDF_STATUS sme_process_msg(tHalHandle hHal, cds_msg_t *pMsg)
 	case eWNI_SME_NDP_CONFIRM_IND:
 	case eWNI_SME_NDP_NEW_PEER_IND:
 	case eWNI_SME_NDP_INITIATOR_RSP:
+	case eWNI_SME_NDP_INDICATION:
+	case eWNI_SME_NDP_RESPONDER_RSP:
 		sme_ndp_msg_processor(pMac, pMsg);
 		break;
 	default:

+ 209 - 16
core/sme/src/nan/nan_datapath_api.c

@@ -25,7 +25,6 @@
  */
 #include "sms_debug.h"
 #include "sme_api.h"
-#include "csr_inside_api.h"
 #include "sme_inside.h"
 #include "csr_internal.h"
 #include "sme_nan_datapath.h"
@@ -111,15 +110,85 @@ QDF_STATUS sme_ndp_initiator_req_handler(tHalHandle hal,
 }
 /**
  * sme_ndp_responder_req_handler() - ndp responder request handler
- * @session_id: session id over which the ndp is being created
+ * @hal: hal handle
  * @req_params: request parameters
  *
  * Return: QDF_STATUS_SUCCESS on success; error number otherwise
  */
-QDF_STATUS sme_ndp_responder_req_handler(uint32_t session_id,
+QDF_STATUS sme_ndp_responder_req_handler(tHalHandle hal,
 	struct ndp_responder_req *req_params)
 {
-	return QDF_STATUS_SUCCESS;
+	QDF_STATUS status;
+	tSmeCmd *cmd;
+	tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
+
+	if (NULL == req_params) {
+		sms_log(mac_ctx, LOGE, FL("Invalid req_params"));
+		return QDF_STATUS_E_INVAL;
+	}
+
+	status = sme_acquire_global_lock(&mac_ctx->sme);
+	if (QDF_STATUS_SUCCESS != status) {
+		sms_log(mac_ctx, LOGE,
+			FL("SME lock failed, status:%d"), status);
+		return status;
+	}
+	cmd = csr_get_command_buffer(mac_ctx);
+	if (NULL == cmd) {
+		sme_release_global_lock(&mac_ctx->sme);
+		return QDF_STATUS_E_RESOURCES;
+	}
+
+	cmd->command = eSmeCommandNdpResponderRequest;
+	cmd->sessionId = (uint8_t)req_params->vdev_id;
+	qdf_mem_copy(&cmd->u.responder_req, req_params,
+			sizeof(*req_params));
+
+	/*
+	 * Pointers copied as part of above operation are
+	 * to be overwritten
+	 */
+	cmd->u.responder_req.ndp_info.ndp_app_info = NULL;
+	cmd->u.responder_req.ndp_config.ndp_cfg = NULL;
+
+	if (req_params->ndp_info.ndp_app_info_len) {
+		cmd->u.responder_req.ndp_info.ndp_app_info =
+			qdf_mem_malloc(req_params->ndp_info.ndp_app_info_len);
+		if (NULL == cmd->u.responder_req.ndp_info.ndp_app_info) {
+			sme_release_global_lock(&mac_ctx->sme);
+			return QDF_STATUS_E_NOMEM;
+		}
+		qdf_mem_copy(cmd->u.responder_req.ndp_info.ndp_app_info,
+			req_params->ndp_info.ndp_app_info,
+			req_params->ndp_info.ndp_app_info_len);
+	}
+
+	if (req_params->ndp_config.ndp_cfg_len) {
+		cmd->u.responder_req.ndp_config.ndp_cfg =
+			qdf_mem_malloc(req_params->ndp_config.ndp_cfg_len);
+		if (NULL == cmd->u.responder_req.ndp_config.ndp_cfg) {
+			sme_release_global_lock(&mac_ctx->sme);
+			qdf_mem_free(
+				cmd->u.responder_req.ndp_info.ndp_app_info);
+			cmd->u.responder_req.ndp_info.ndp_app_info_len = 0;
+			return QDF_STATUS_E_NOMEM;
+		}
+		qdf_mem_copy(cmd->u.responder_req.ndp_config.ndp_cfg,
+			req_params->ndp_config.ndp_cfg,
+			req_params->ndp_config.ndp_cfg_len);
+	}
+
+	status = csr_queue_sme_command(mac_ctx, cmd, true);
+	if (QDF_STATUS_SUCCESS != status) {
+		sms_log(mac_ctx, LOGE,
+			FL("SME enqueue failed, status:%d"), status);
+		qdf_mem_free(cmd->u.responder_req.ndp_info.ndp_app_info);
+		qdf_mem_free(cmd->u.responder_req.ndp_config.ndp_cfg);
+		cmd->u.responder_req.ndp_info.ndp_app_info_len = 0;
+		cmd->u.responder_req.ndp_config.ndp_cfg_len = 0;
+	}
+	sme_release_global_lock(&mac_ctx->sme);
+	return status;
 }
 
 /**
@@ -357,6 +426,69 @@ QDF_STATUS csr_process_ndp_initiator_request(tpAniSirGlobal mac_ctx,
 	return cds_send_mb_message_to_mac(lim_msg);
 }
 
+/**
+ * csr_process_ndp_responder_request() - ndp responder req
+ * @mac_ctx: Global MAC context
+ * @cmd: Cmd sent to SME
+ *
+ * Return: Success or failure code
+ */
+QDF_STATUS csr_process_ndp_responder_request(tpAniSirGlobal mac_ctx,
+							tSmeCmd *cmd)
+{
+	struct sir_sme_ndp_responder_req *lim_msg;
+	uint16_t msg_len;
+	QDF_STATUS status;
+
+	if (!cmd) {
+		sms_log(mac_ctx, LOGE, FL("Invalid req_params"));
+		return QDF_STATUS_E_INVAL;
+	}
+
+	msg_len  = sizeof(*lim_msg);
+	lim_msg = qdf_mem_malloc(msg_len);
+	if (!lim_msg) {
+		sms_log(mac_ctx, LOGE, FL("Mem alloc fail"));
+		status = QDF_STATUS_E_NOMEM;
+		goto free_config;
+	}
+
+	qdf_mem_set(lim_msg, msg_len, 0);
+	lim_msg->msg_type = eWNI_SME_NDP_RESPONDER_REQ;
+	lim_msg->msg_len = msg_len;
+	/*
+	 * following is being copied from p_cmd->u.initiator_req,
+	 * no need to perform deep copy, as we are going to use memory
+	 * allocated at SME in p_cmd->u.initiator_req and pass it all the way
+	 * to WMA.
+	 */
+	qdf_mem_copy(&lim_msg->req, &cmd->u.responder_req,
+			sizeof(struct ndp_responder_req));
+
+	sms_log(mac_ctx, LOG1,
+		FL("vdev_id %d ndp_rsp = %d Instance id %d"),
+		lim_msg->req.vdev_id,
+		lim_msg->req.ndp_rsp,
+		lim_msg->req.ndp_instance_id);
+
+	status = cds_send_mb_message_to_mac(lim_msg);
+
+free_config:
+	if (status != QDF_STATUS_SUCCESS) {
+		/*
+		 * If fail, free up the ndp_cfg and ndp_app_info
+		 * allocated in sme.
+		 */
+		qdf_mem_free(cmd->u.responder_req.ndp_info.ndp_app_info);
+		qdf_mem_free(cmd->u.responder_req.ndp_config.ndp_cfg);
+		cmd->u.responder_req.ndp_info.ndp_app_info_len = 0;
+		cmd->u.responder_req.ndp_config.ndp_cfg_len = 0;
+		cmd->u.responder_req.ndp_config.ndp_cfg = NULL;
+		cmd->u.responder_req.ndp_info.ndp_app_info = NULL;
+	}
+	return status;
+}
+
 /**
  * sme_ndp_msg_processor() - message processor for ndp/ndi north-bound SME msg.
  * @mac_ctx: Global MAC context
@@ -401,17 +533,6 @@ void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 		}
 		release_active_cmd = true;
 		cmd_to_rel = eSmeCommandNdpInitiatorRequest;
-		entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
-				      LL_ACCESS_LOCK);
-		if (entry != NULL) {
-			cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
-			if (cmd_to_rel == cmd->command) {
-				qdf_mem_free(
-				    cmd->u.initiator_req.ndp_config.ndp_cfg);
-				qdf_mem_free(
-				    cmd->u.initiator_req.ndp_info.ndp_app_info);
-			}
-		}
 		break;
 	}
 	case eWNI_SME_NDP_NEW_PEER_IND: {
@@ -423,15 +544,87 @@ void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 		session_id = roam_info.ndp.ndp_peer_ind_params.session_id;
 		break;
 	}
+	case eWNI_SME_NDP_INDICATION:
+		result = eCSR_ROAM_RESULT_NDP_INDICATION;
+		/* copy msg from msg body to roam info passed to callback */
+		qdf_mem_copy(&roam_info.ndp.ndp_indication_params,
+			msg->bodyptr, sizeof(struct ndp_indication_event));
+		session_id = roam_info.ndp.ndp_indication_params.vdev_id;
+		break;
+	case eWNI_SME_NDP_RESPONDER_RSP:
+		if (true == msg->bodyval) {
+			/* rsp was locally generated, do not send to HDD */
+			send_to_user = false;
+		} else {
+			result = eCSR_ROAM_RESULT_NDP_RESPONDER_RSP;
+			/*
+			 * Copy msg from msg body to roam info passed to
+			 * callback
+			 */
+			qdf_mem_copy(&roam_info.ndp.ndp_responder_rsp_params,
+				msg->bodyptr,
+				sizeof(struct ndp_responder_rsp_event));
+			session_id =
+				roam_info.ndp.ndp_responder_rsp_params.vdev_id;
+		}
+		release_active_cmd = true;
+		cmd_to_rel = eSmeCommandNdpResponderRequest;
+		break;
 	default:
 		sms_log(mac_ctx, LOGE, FL("Unhandled NDP rsp"));
 		qdf_mem_free(msg->bodyptr);
 		return;
 	}
 
-	if (true == send_to_user) {
+	if (true == send_to_user)
 		csr_roam_call_callback(mac_ctx, session_id, &roam_info, 0,
 				    eCSR_ROAM_NDP_STATUS_UPDATE, result);
+
+	/*
+	 * free ndp_cfg and ndp_app_info if required
+	 * For some commands this info may be needed in HDD
+	 * so free them after roam callback.
+	 */
+	switch (msg->type) {
+	case eWNI_SME_NDP_INITIATOR_RSP:
+		entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
+				LL_ACCESS_LOCK);
+		if (entry != NULL) {
+			cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
+			if (eSmeCommandNdpInitiatorRequest == cmd->command) {
+				qdf_mem_free(
+					cmd->u.initiator_req.
+					ndp_config.ndp_cfg);
+				qdf_mem_free(
+					cmd->u.initiator_req.
+					ndp_info.ndp_app_info);
+			}
+		}
+		break;
+	case eWNI_SME_NDP_RESPONDER_RSP:
+		entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
+					LL_ACCESS_LOCK);
+		if (entry != NULL) {
+			cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
+			if (eSmeCommandNdpResponderRequest == cmd->command) {
+				qdf_mem_free(
+					cmd->u.responder_req.
+					ndp_config.ndp_cfg);
+				qdf_mem_free(
+					cmd->u.responder_req.
+					ndp_info.ndp_app_info);
+			}
+		}
+		break;
+	case eWNI_SME_NDP_INDICATION:
+		qdf_mem_free(
+			roam_info.ndp.ndp_indication_params.ndp_config.ndp_cfg);
+		qdf_mem_free(
+			roam_info.ndp.ndp_indication_params.
+			ndp_info.ndp_app_info);
+		break;
+	default:
+		break;
 	}
 	qdf_mem_free(msg->bodyptr);
 	if (release_active_cmd == false)

+ 4 - 1
core/wma/src/wma_main.c

@@ -5950,7 +5950,10 @@ QDF_STATUS wma_mc_process_msg(void *cds_context, cds_msg_t *msg)
 		wma_handle_ndp_initiator_req(wma_handle, msg->bodyptr);
 		qdf_mem_free(msg->bodyptr);
 		break;
-
+	case SIR_HAL_NDP_RESPONDER_REQ:
+		wma_handle_ndp_responder_req(wma_handle, msg->bodyptr);
+		qdf_mem_free(msg->bodyptr);
+		break;
 	default:
 		WMA_LOGD("unknow msg type %x", msg->type);
 		/* Do Nothing? MSG Body should be freed at here */

+ 126 - 7
core/wma/src/wma_nan_datapath.c

@@ -74,8 +74,8 @@ QDF_STATUS wma_handle_ndp_initiator_req(tp_wma_handle wma_handle, void *req)
 	 * WMI command expects 4 byte alligned len:
 	 * round up ndp_cfg_len and ndp_app_info_len to 4 bytes
 	 */
-	ndp_cfg_len = roundup(ndp_req->ndp_config.ndp_cfg_len, 4);
-	ndp_app_info_len = roundup(ndp_req->ndp_info.ndp_app_info_len, 4);
+	ndp_cfg_len = qdf_roundup(ndp_req->ndp_config.ndp_cfg_len, 4);
+	ndp_app_info_len = qdf_roundup(ndp_req->ndp_info.ndp_app_info_len, 4);
 	/* allocated memory for fixed params as well as variable size data */
 	len = sizeof(*cmd) + ndp_cfg_len + ndp_app_info_len +
 		(2 * WMI_TLV_HDR_SIZE) + sizeof(*ch_tlv);
@@ -174,7 +174,110 @@ send_ndi_initiator_fail:
 QDF_STATUS wma_handle_ndp_responder_req(tp_wma_handle wma_handle,
 					struct ndp_responder_req *req_params)
 {
+	wmi_buf_t buf;
+	ol_txrx_vdev_handle vdev;
+	uint32_t vdev_id = 0, ndp_cfg_len, ndp_app_info_len;
+	uint8_t *cfg_info, *app_info;
+	int ret;
+	wmi_ndp_responder_req_fixed_param *cmd;
+	uint16_t len;
+	struct ndp_responder_rsp_event rsp = {0};
+	cds_msg_t pe_msg = {0};
+
+	if (NULL == req_params) {
+		WMA_LOGE(FL("Invalid req_params."));
+		return QDF_STATUS_E_INVAL;
+	}
+
+	vdev_id = req_params->vdev_id;
+	WMA_LOGD(FL("vdev_id: %d, transaction_id: %d, ndp_rsp %d, ndp_instance_id: %d, ndp_app_info_len: %d"),
+			req_params->vdev_id, req_params->transaction_id,
+			req_params->ndp_rsp,
+			req_params->ndp_instance_id,
+			req_params->ndp_info.ndp_app_info_len);
+	vdev = wma_find_vdev_by_id(wma_handle, vdev_id);
+	if (!vdev) {
+		WMA_LOGE(FL("vdev not found for vdev id %d."), vdev_id);
+		goto send_ndi_responder_fail;
+	}
+
+	if (!WMA_IS_VDEV_IN_NDI_MODE(wma_handle->interfaces, vdev_id)) {
+		WMA_LOGE(FL("vdev :$%d, not in NDI mode"), vdev_id);
+		goto send_ndi_responder_fail;
+	}
+
+	/*
+	 * WMI command expects 4 byte alligned len:
+	 * round up ndp_cfg_len and ndp_app_info_len to 4 bytes
+	 */
+	ndp_cfg_len = qdf_roundup(req_params->ndp_config.ndp_cfg_len, 4);
+	ndp_app_info_len =
+		qdf_roundup(req_params->ndp_info.ndp_app_info_len, 4);
+	/* allocated memory for fixed params as well as variable size data */
+	len = sizeof(*cmd) + ndp_cfg_len + ndp_app_info_len +
+		(2 * WMI_TLV_HDR_SIZE);
+	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
+	if (!buf) {
+		WMA_LOGE(FL("wmi_buf_alloc failed"));
+		goto send_ndi_responder_fail;
+	}
+	cmd = (wmi_ndp_responder_req_fixed_param *) wmi_buf_data(buf);
+	WMITLV_SET_HDR(&cmd->tlv_header,
+			WMITLV_TAG_STRUC_wmi_ndp_responder_req_fixed_param,
+			WMITLV_GET_STRUCT_TLVLEN(
+				wmi_ndp_responder_req_fixed_param));
+	cmd->vdev_id = req_params->vdev_id;
+	cmd->transaction_id = req_params->transaction_id;
+	cmd->ndp_instance_id = req_params->ndp_instance_id;
+	cmd->rsp_code = req_params->ndp_rsp;
+
+	cmd->ndp_cfg_len = req_params->ndp_config.ndp_cfg_len;
+	cmd->ndp_app_info_len = req_params->ndp_info.ndp_app_info_len;
+
+	cfg_info = (uint8_t *)&cmd[1];
+	/* WMI command expects 4 byte alligned len */
+	WMITLV_SET_HDR(cfg_info, WMITLV_TAG_ARRAY_BYTE, ndp_cfg_len);
+	qdf_mem_copy(&cfg_info[WMI_TLV_HDR_SIZE],
+		     req_params->ndp_config.ndp_cfg, cmd->ndp_cfg_len);
+
+	app_info = &cfg_info[WMI_TLV_HDR_SIZE + ndp_cfg_len];
+	/* WMI command expects 4 byte alligned len */
+	WMITLV_SET_HDR(app_info, WMITLV_TAG_ARRAY_BYTE, ndp_app_info_len);
+	qdf_mem_copy(&app_info[WMI_TLV_HDR_SIZE],
+		     req_params->ndp_info.ndp_app_info,
+		     req_params->ndp_info.ndp_app_info_len);
+
+	WMA_LOGD(FL("ndp_config len: %d"),
+		req_params->ndp_config.ndp_cfg_len);
+	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG,
+			req_params->ndp_config.ndp_cfg,
+			req_params->ndp_config.ndp_cfg_len);
+
+	WMA_LOGD(FL("ndp_app_info len: %d"),
+		req_params->ndp_info.ndp_app_info_len);
+	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG,
+			req_params->ndp_info.ndp_app_info,
+			req_params->ndp_info.ndp_app_info_len);
+
+	ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
+				   WMI_NDP_RESPONDER_REQ_CMDID);
+	if (ret < 0) {
+		WMA_LOGE(FL("WMI_NDP_RESPONDER_REQ_CMDID failed, ret: %d"),
+			ret);
+		wmi_buf_free(buf);
+		goto send_ndi_responder_fail;
+	}
 	return QDF_STATUS_SUCCESS;
+send_ndi_responder_fail:
+	qdf_mem_zero(&rsp, sizeof(rsp));
+	rsp.vdev_id = req_params->vdev_id;
+	rsp.transaction_id = req_params->transaction_id;
+	rsp.status = QDF_STATUS_E_FAILURE;
+
+	pe_msg.bodyptr = &rsp;
+	pe_msg.type = SIR_HAL_NDP_RESPONDER_RSP;
+	return wma_handle->pe_ndp_event_handler(wma_handle->mac_context,
+						&pe_msg);
 }
 
 /**
@@ -215,9 +318,9 @@ QDF_STATUS wma_handle_ndp_sched_update_req(tp_wma_handle wma_handle,
 static int wma_ndp_indication_event_handler(void *handle, uint8_t *event_info,
 					    uint32_t len)
 {
+	cds_msg_t pe_msg = {0};
 	WMI_NDP_INDICATION_EVENTID_param_tlvs *event;
 	wmi_ndp_indication_event_fixed_param *fixed_params;
-	cds_msg_t pe_msg = {0};
 	struct ndp_indication_event ind_event = {0};
 	tp_wma_handle wma_handle = handle;
 
@@ -276,14 +379,13 @@ static int wma_ndp_indication_event_handler(void *handle, uint8_t *event_info,
 			     event->ndp_app_info,
 			     ind_event.ndp_info.ndp_app_info_len);
 	}
+
 	pe_msg.type = SIR_HAL_NDP_INDICATION;
 	pe_msg.bodyptr = &ind_event;
-	pe_msg.bodyval = 0;
 	return wma_handle->pe_ndp_event_handler(wma_handle->mac_context,
 						&pe_msg);
 }
 
-
 /**
  * wma_ndp_responder_rsp_event_handler() - NDP responder response event handler
  * @handle: wma handle
@@ -296,7 +398,25 @@ static int wma_ndp_indication_event_handler(void *handle, uint8_t *event_info,
 static int wma_ndp_responder_rsp_event_handler(void *handle,
 					uint8_t *event_info, uint32_t len)
 {
-	return 0;
+	cds_msg_t pe_msg = {0};
+	tp_wma_handle wma_handle = handle;
+	WMI_NDP_RESPONDER_RSP_EVENTID_param_tlvs *event;
+	wmi_ndp_responder_rsp_event_fixed_param  *fixed_params;
+	struct ndp_responder_rsp_event rsp = {0};
+
+	event = (WMI_NDP_RESPONDER_RSP_EVENTID_param_tlvs *)event_info;
+	fixed_params = event->fixed_param;
+
+	rsp.vdev_id = fixed_params->vdev_id;
+	rsp.transaction_id = fixed_params->transaction_id;
+	rsp.reason = fixed_params->reason_code;
+	rsp.status = fixed_params->rsp_status;
+	WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr,
+				rsp.peer_mac_addr.bytes);
+	pe_msg.bodyptr = &rsp;
+	pe_msg.type = SIR_HAL_NDP_RESPONDER_RSP;
+	return wma_handle->pe_ndp_event_handler(wma_handle->mac_context,
+						&pe_msg);
 }
 
 /**
@@ -370,7 +490,6 @@ static int wma_ndp_confirm_event_handler(void *handle, uint8_t *event_info,
 	}
 	msg.type = SIR_HAL_NDP_CONFIRM;
 	msg.bodyptr = &ndp_confirm;
-	msg.bodyval = 0;
 	return wma_handle->pe_ndp_event_handler(wma_handle->mac_context, &msg);
 }
 

+ 7 - 0
core/wma/src/wma_nan_datapath.h

@@ -46,6 +46,8 @@ static inline void wma_update_hdd_cfg_ndp(tp_wma_handle wma_handle,
 {
 	tgt_cfg->nan_datapath_enabled = wma_handle->nan_datapath_enabled;
 }
+QDF_STATUS wma_handle_ndp_responder_req(tp_wma_handle wma_handle,
+					struct ndp_responder_req *req_params);
 void wma_delete_all_nan_remote_peers(tp_wma_handle wma,
 					uint32_t vdev_id);
 
@@ -88,6 +90,11 @@ static inline QDF_STATUS wma_handle_ndp_initiator_req(tp_wma_handle wma_handle,
 {
 	return QDF_STATUS_SUCCESS;
 }
+static inline QDF_STATUS wma_handle_ndp_responder_req(tp_wma_handle wma_handle,
+					void *req_params)
+{
+	return QDF_STATUS_SUCCESS;
+}
 
 #endif /* WLAN_FEATURE_NAN_DATAPATH */
 #endif /* __WMA_NAN_DATAPATH_H */