Parcourir la source

qcacld-3.0: Add support for NAN Data End Request

This is qcacld-2.0 to qcacld-3.0 propagation.

Add implementation of NAN Data End request.

Change-Id: Iddd22a83b0763dab2e7398f4d2cf5b9ada58225a
CRs-Fixed: 962367
Naveen Rawat il y a 8 ans
Parent
commit
f28315cd66

+ 2 - 1
core/hdd/inc/wlan_hdd_assoc.h

@@ -55,7 +55,8 @@
 /* Timeout in ms for peer info request commpletion */
 #define IBSS_PEER_INFO_REQ_TIMOEUT 1000
 
-/* Type Declarations */
+#define INVALID_PEER_IDX -1
+
 /**
  * enum eConnectionState - connection state values at HDD
  * @eConnectionState_NotConnected: Not associated in Infra or participating in

+ 2 - 0
core/hdd/inc/wlan_hdd_tx_rx.h

@@ -106,6 +106,8 @@ static inline void hdd_get_tx_resource(hdd_adapter_t *adapter,
 }
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 
+int hdd_get_peer_idx(hdd_station_ctx_t *sta_ctx, struct qdf_mac_addr *addr);
+
 const char *hdd_reason_type_to_string(enum netif_reason_type reason);
 const char *hdd_action_type_to_string(enum netif_action_type action);
 void wlan_hdd_netif_queue_control(hdd_adapter_t *adapter,

+ 2 - 1
core/hdd/src/wlan_hdd_assoc.c

@@ -2568,6 +2568,8 @@ bool hdd_save_peer(hdd_station_ctx_t *sta_ctx, uint8_t sta_id,
 
 	for (idx = 0; idx < SIR_MAX_NUM_STA_IN_IBSS; idx++) {
 		if (0 == sta_ctx->conn_info.staId[idx]) {
+			hddLog(LOG1, FL("adding peer: %pM, sta_id: %d, at idx: %d"),
+			       peer_mac_addr, sta_id, idx);
 			sta_ctx->conn_info.staId[idx] = sta_id;
 			qdf_copy_macaddr(
 				&sta_ctx->conn_info.peerMacAddress[idx],
@@ -5449,7 +5451,6 @@ int iw_set_auth(struct net_device *dev, struct iw_request_info *info,
 
 	return ret;
 }
-
 /**
  * __iw_get_auth() -
  *	This function returns the auth type to the wpa_supplicant

+ 164 - 21
core/hdd/src/wlan_hdd_nan_datapath.c

@@ -44,8 +44,8 @@ qca_wlan_vendor_ndp_policy[QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX + 1] = {
 	[QCA_WLAN_VENDOR_ATTR_NDP_SERVICE_INSTANCE_ID] = { .type = NLA_U16 },
 	[QCA_WLAN_VENDOR_ATTR_NDP_CHANNEL] = { .type = NLA_U32 },
 	[QCA_WLAN_VENDOR_ATTR_NDP_PEER_DISCOVERY_MAC_ADDR] = {
-						.type = NLA_BINARY,
-						.len = QDF_MAC_ADDR_SIZE },
+					.type = NLA_BINARY,
+					.len = QDF_MAC_ADDR_SIZE },
 	[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_SECURITY] = { .type = NLA_U16 },
 	[QCA_WLAN_VENDOR_ATTR_NDP_CONFIG_QOS] = { .type = NLA_BINARY,
 					.len = NDP_QOS_INFO_LEN },
@@ -57,6 +57,8 @@ qca_wlan_vendor_ndp_policy[QCA_WLAN_VENDOR_ATTR_NDP_PARAMS_MAX + 1] = {
 	[QCA_WLAN_VENDOR_ATTR_NDP_SCHEDULE_STATUS_CODE] = { .type = NLA_U16 },
 	[QCA_WLAN_VENDOR_ATTR_NDP_NDI_MAC_ADDR] = { .type = NLA_BINARY,
 					.len = QDF_MAC_ADDR_SIZE },
+	[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY] = { .type = NLA_BINARY,
+					.len = NDP_NUM_INSTANCE_ID },
 };
 
 /**
@@ -412,10 +414,10 @@ static int hdd_ndi_delete_req_handler(hdd_context_t *hdd_ctx,
 		return -EINVAL;
 	}
 
-	/* check if there are active NDP sessions on the adapter */
-	if (ndp_ctx->active_ndp_sessions > 0) {
-		hdd_err(FL("NDP sessions active %d, cannot delete NDI"),
-			ndp_ctx->active_ndp_sessions);
+	/* check if there are active peers on the adapter */
+	if (ndp_ctx->active_ndp_peers > 0) {
+		hdd_err(FL("NDP peers active: %d, cannot delete NDI"),
+			ndp_ctx->active_ndp_peers);
 		return -EINVAL;
 	}
 
@@ -676,9 +678,51 @@ static int hdd_ndp_responder_req_handler(hdd_context_t *hdd_ctx,
  *
  * Return: 0 on success or error code on failure
  */
-static int hdd_ndp_end_req_handler(hdd_context_t *hdd_ctx,
-						struct nlattr **tb)
+static int hdd_ndp_end_req_handler(hdd_context_t *hdd_ctx, struct nlattr **tb)
 {
+	struct ndp_end_req req = {0};
+	QDF_STATUS status;
+	tHalHandle hal = hdd_ctx->hHal;
+
+	ENTER();
+
+	/* NAN data path coexists only with STA interface */
+	if (!hdd_is_ndp_allowed(hdd_ctx)) {
+		hddLog(LOGE, FL("Unsupported concurrency for NAN datapath"));
+		return -EINVAL;
+	}
+
+	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]) {
+		hddLog(LOGE, FL("Transaction ID is unavailable"));
+		return -EINVAL;
+	}
+	req.transaction_id =
+		nla_get_u16(tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]);
+
+	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY]) {
+		hddLog(LOGE, FL("NDP instance ID array is unavailable"));
+		return -EINVAL;
+	}
+
+	req.num_ndp_instances =
+		nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY]) /
+			sizeof(uint32_t);
+	if (0 >= req.num_ndp_instances) {
+		hddLog(LOGE, FL("Num NDP instances is 0"));
+		return -EINVAL;
+	}
+	req.ndp_ids = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID_ARRAY]);
+
+	hddLog(LOG1, FL("sending ndp_end_req to SME, transaction_id: %d"),
+		req.transaction_id);
+
+	status = sme_ndp_end_req_handler(hal, &req);
+	if (status != QDF_STATUS_SUCCESS) {
+		hddLog(LOGE, FL("sme_ndp_end_req_handler failed, status: %d"),
+		       status);
+		return -ECOMM;
+	}
+	EXIT();
 	return 0;
 }
 
@@ -1072,6 +1116,9 @@ static void hdd_ndp_new_peer_ind_handler(hdd_adapter_t *adapter,
 		hdd_err(FL("Invalid new NDP peer params"));
 		return;
 	}
+	hddLog(LOG1, FL("session_id: %d, peer_mac: %pM, sta_id: %d"),
+		new_peer_ind->session_id, new_peer_ind->peer_mac_addr.bytes,
+		new_peer_ind->sta_id);
 
 	/* save peer in ndp ctx */
 	if (false == hdd_save_peer(sta_ctx, new_peer_ind->sta_id,
@@ -1082,6 +1129,8 @@ static void hdd_ndp_new_peer_ind_handler(hdd_adapter_t *adapter,
 
 	/* this function is called for each new peer */
 	ndp_ctx->active_ndp_peers++;
+	hddLog(LOG1, FL("vdev_id: %d, num_peers: %d"),
+		adapter->sessionId,  ndp_ctx->active_ndp_peers);
 
 	hdd_roam_register_sta(adapter, &roam_info, new_peer_ind->sta_id,
 			    &new_peer_ind->peer_mac_addr, &tmp_bss_descp);
@@ -1089,7 +1138,7 @@ static void hdd_ndp_new_peer_ind_handler(hdd_adapter_t *adapter,
 	/* perform following steps for first new peer ind */
 	if (ndp_ctx->active_ndp_peers == 1) {
 		hdd_ctx->sta_to_adapter[NDP_BROADCAST_STAID] = adapter;
-		hdd_save_peer(sta_ctx, new_peer_ind->sta_id, &bc_mac_addr);
+		hdd_save_peer(sta_ctx, NDP_BROADCAST_STAID, &bc_mac_addr);
 		hdd_roam_register_sta(adapter, &roam_info, NDP_BROADCAST_STAID,
 				    &bc_mac_addr, &tmp_bss_descp);
 		hddLog(LOG1, FL("Set ctx connection state to connected"));
@@ -1134,11 +1183,13 @@ static void hdd_ndp_peer_departed_ind_handler(
 static void hdd_ndp_confirm_ind_handler(hdd_adapter_t *adapter,
 						void *ind_params)
 {
+	int idx;
 	uint32_t ndp_qos_config = 0;
 	struct ndp_confirm_event *ndp_confirm = ind_params;
 	struct sk_buff *vendor_event;
 	hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 	struct nan_datapath_ctx *ndp_ctx = WLAN_HDD_GET_NDP_CTX_PTR(adapter);
+	hdd_station_ctx_t *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
 	uint32_t data_len;
 
 	ENTER();
@@ -1150,8 +1201,14 @@ static void hdd_ndp_confirm_ind_handler(hdd_adapter_t *adapter,
 	if (0 != wlan_hdd_validate_context(hdd_ctx))
 		return;
 
-	/* ndp_confirm is called each time user generated npd req succeeds */
-	ndp_ctx->active_ndp_sessions++;
+	/* ndp_confirm is called each time user generated ndp req succeeds */
+	idx = hdd_get_peer_idx(sta_ctx, &ndp_confirm->peer_ndi_mac_addr);
+	if (idx == INVALID_PEER_IDX)
+		hddLog(LOGE,
+			FL("can't find addr: %pM in vdev_id: %d, peer table."),
+			&ndp_confirm->peer_ndi_mac_addr, adapter->sessionId);
+	else
+		ndp_ctx->active_ndp_sessions[idx]++;
 
 	data_len = (4 * sizeof(uint32_t)) + QDF_MAC_ADDR_SIZE + IFNAMSIZ +
 			sizeof(uint16_t) + NLMSG_HDRLEN + (8 * NLA_HDRLEN) +
@@ -1439,12 +1496,98 @@ ndp_responder_rsp_nla_failed:
  * @adapter: pointer to adapter context
  * @rsp_params: response parameters
  *
+ * Following vendor event is sent to cfg80211:
+ * QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD =
+ *         QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE(4 bytest)
+ * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_TYPE (4 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE (4 bytes)
+ * QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID (2 bytes)
+ *
  * Return: none
  */
-static void hdd_ndp_end_rsp_handler(hdd_adapter_t *adapter,
-						void *rsp_params)
+static void hdd_ndp_end_rsp_handler(hdd_adapter_t *adapter, void *rsp_params)
 {
+	struct sk_buff *vendor_event;
+	hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	struct ndp_end_rsp_event *rsp = rsp_params;
+	struct nan_datapath_ctx *ndp_ctx;
+	uint32_t data_len, i;
+	int idx;
+	hdd_station_ctx_t *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+
+	ENTER();
+
+	if (!rsp) {
+		hddLog(LOGE, FL("Invalid ndp end response"));
+		return;
+	}
+
+	if (0 != wlan_hdd_validate_context(hdd_ctx))
+		return;
+
+	/* adjust active ndp instances per peer */
+	if (rsp->status == NDP_CMD_RSP_STATUS_SUCCESS) {
+		for (i = 0; i < rsp->num_peers; i++) {
+			adapter = hdd_get_adapter_by_vdev(hdd_ctx,
+						rsp->ndp_map[i].vdev_id);
+			if (NULL == adapter) {
+				hddLog(LOGE,
+					FL("adapter for vdev_id: %d not found"),
+					rsp->ndp_map[i].vdev_id);
+				continue;
+			}
+			sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+			ndp_ctx = WLAN_HDD_GET_NDP_CTX_PTR(adapter);
+			idx = hdd_get_peer_idx(sta_ctx,
+					&rsp->ndp_map[i].peer_ndi_mac_addr);
+			if (idx == INVALID_PEER_IDX)
+				hddLog(LOGE,
+					FL("can't find addr: %pM in vdev_id: %d, peer table."),
+					&rsp->ndp_map[i].peer_ndi_mac_addr,
+					rsp->ndp_map[i].vdev_id);
+			else
+				ndp_ctx->active_ndp_sessions[idx] =
+					rsp->ndp_map[i].num_active_ndp_sessions;
+		}
+	}
+
+	data_len = NLMSG_HDRLEN + (4 * NLA_HDRLEN) + (3 * sizeof(uint32_t)) +
+		   sizeof(uint16_t);
+
+	vendor_event = cfg80211_vendor_event_alloc(hdd_ctx->wiphy, NULL,
+				data_len, QCA_NL80211_VENDOR_SUBCMD_NDP_INDEX,
+				GFP_KERNEL);
+	if (!vendor_event) {
+		hddLog(LOGE, FL("cfg80211_vendor_event_alloc failed"));
+		return;
+	}
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_SUBCMD,
+			QCA_WLAN_VENDOR_ATTR_NDP_END_RESPONSE))
+		goto ndp_end_rsp_nla_failed;
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_TYPE,
+			rsp->status))
+		goto ndp_end_rsp_nla_failed;
+
+	if (nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_DRV_RETURN_VALUE,
+			rsp->reason))
+		goto ndp_end_rsp_nla_failed;
+
+	if (nla_put_u16(vendor_event, QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID,
+			rsp->transaction_id))
+		goto ndp_end_rsp_nla_failed;
+
+	hddLog(LOG1, FL("NDP End rsp sent, transaction id: %d, status: %d, reason: %d"),
+	       rsp->transaction_id, rsp->status, rsp->reason);
+	cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+	EXIT();
 	return;
+
+ndp_end_rsp_nla_failed:
+	hddLog(LOGE, FL("nla_put api failed"));
+	kfree_skb(vendor_event);
+	EXIT();
 }
 
 /**
@@ -1523,7 +1666,7 @@ void hdd_ndp_event_handler(hdd_adapter_t *adapter,
 			break;
 		case eCSR_ROAM_RESULT_NDP_END_RSP:
 			hdd_ndp_end_rsp_handler(adapter,
-				&roam_info->ndp.ndp_end_rsp_params);
+				roam_info->ndp.ndp_end_rsp_params);
 			break;
 		case eCSR_ROAM_RESULT_NDP_PEER_DEPARTED_IND:
 			hdd_ndp_peer_departed_ind_handler(adapter,
@@ -1597,14 +1740,14 @@ static int __wlan_hdd_cfg80211_process_ndp_cmd(struct wiphy *wiphy,
 	transaction_id = nla_get_u16(
 			tb[QCA_WLAN_VENDOR_ATTR_NDP_TRANSACTION_ID]);
 
-	if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) {
-		hdd_err(FL("Interface name string is unavailable"));
-		return -EINVAL;
+	if (tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) {
+		iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]);
+		hdd_err(FL("Transaction Id: %d NDP Cmd: %d iface_name: %s"),
+			transaction_id, ndp_cmd_type, iface_name);
+	} else {
+		hdd_err(FL("Transaction Id: %d NDP Cmd: %d iface_name: unspecified"),
+			transaction_id, ndp_cmd_type);
 	}
-	iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]);
-
-	hddLog(LOG2, FL("Transaction Id: %d NDP Cmd: %d iface_name: %s"),
-		transaction_id, ndp_cmd_type, iface_name);
 
 	switch (ndp_cmd_type) {
 	case QCA_WLAN_VENDOR_ATTR_NDP_INTERFACE_CREATE:

+ 3 - 1
core/hdd/src/wlan_hdd_nan_datapath.h

@@ -39,6 +39,7 @@ struct wireless_dev;
 
 #define NDP_APP_INFO_LEN 255
 #define NDP_QOS_INFO_LEN 255
+#define NDP_NUM_INSTANCE_ID 255
 
 #define HDD_MAX_NUM_NDP_STA           (MAX_IBSS_PEERS)
 #define NDP_BROADCAST_STAID           (0)
@@ -192,7 +193,8 @@ enum nan_datapath_state {
  */
 struct nan_datapath_ctx {
 	enum nan_datapath_state state;
-	uint32_t active_ndp_sessions;
+	/* idx in following array should follow conn_info.peerMacAddress */
+	uint32_t active_ndp_sessions[HDD_MAX_NUM_NDP_STA];
 	uint32_t active_ndp_peers;
 	uint16_t ndp_create_transaction_id;
 	uint16_t ndp_delete_transaction_id;

+ 23 - 0
core/hdd/src/wlan_hdd_tx_rx.c

@@ -778,6 +778,29 @@ static QDF_STATUS hdd_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
 	return QDF_STATUS_SUCCESS;
 }
 
+/**
+ * hdd_get_peer_idx() - Get the idx for given address in peer table
+ * @sta_ctx: pointer to HDD Station Context
+ * @addr: pointer to Peer Mac address
+ *
+ * Return: index when success else INVALID_PEER_IDX
+ */
+int hdd_get_peer_idx(hdd_station_ctx_t *sta_ctx, struct qdf_mac_addr *addr)
+{
+	uint8_t idx;
+
+	for (idx = 0; idx < MAX_IBSS_PEERS; idx++) {
+		if (sta_ctx->conn_info.staId[idx] == 0)
+			continue;
+		if (qdf_mem_cmp(&sta_ctx->conn_info.peerMacAddress[idx],
+				addr, sizeof(struct qdf_mac_addr)))
+			continue;
+		return idx;
+	}
+
+	return INVALID_PEER_IDX;
+}
+
 /**
  * hdd_rx_packet_cbk() - Receive packet handler
  * @context: pointer to HDD context

+ 10 - 6
core/mac/inc/sir_api.h

@@ -6127,16 +6127,14 @@ struct ndp_confirm_event {
 /**
  * struct ndp_end_req - ndp end request
  * @transaction_id: unique transaction identifier
- * @vdev_id: session id of the interface over which ndp is being created
  * @num_ndp_instances: number of ndp instances to be terminated
- * @ndp_instances: list of ndp instances to be terminated
+ * @ndp_ids: pointer to array of ndp_instance_id to be terminated
  *
  */
 struct ndp_end_req {
 	uint32_t transaction_id;
-	uint32_t vdev_id;
 	uint32_t num_ndp_instances;
-	uint32_t ndp_instances[];
+	uint32_t *ndp_ids;
 };
 
 /**
@@ -6155,13 +6153,19 @@ struct peer_ndp_map {
 /**
  * struct ndp_end_rsp_event  - firmware response to ndp end request
  * @transaction_id: unique identifier for the request
- * @vdev_id: session id of the interface over which ndp is being created
+ * @status: status of operation
+ * @reason: reason(opaque to host driver)
+ * @num_ndp_terminated: if successful, number of ndp instances terminated
+ * @num_peers: number of peers in ndp_map
  * @ndp_map: mapping of NDP instances to peer to VDEV
  *
  */
 struct ndp_end_rsp_event {
 	uint32_t transaction_id;
-	uint32_t vdev_id;
+	uint32_t status;
+	uint32_t reason;
+	uint32_t num_ndp_terminated;
+	uint32_t num_peers;
 	struct peer_ndp_map ndp_map[];
 };
 

+ 1 - 1
core/mac/inc/wni_api.h

@@ -259,10 +259,10 @@ enum eWniMsgTypes {
 	eWNI_SME_NDP_INDICATION,
 	eWNI_SME_NDP_RESPONDER_REQ,
 	eWNI_SME_NDP_RESPONDER_RSP,
+	eWNI_SME_NDP_END_REQ,
 	eWNI_SME_NDP_END_RSP,
 	eWNI_SME_NDP_PEER_DEPARTED_IND,
 	eWNI_SME_NDP_END_IND,
-
 	eWNI_SME_MSG_TYPES_END
 };
 

+ 0 - 2
core/mac/src/include/sir_params.h

@@ -748,8 +748,6 @@ typedef struct sSirMbMsgP2p {
 
 #define SIR_TST_XMIT_MSG_QS_EMPTY_EVT     0x00000080
 
-/* added for OBSS */
-
 /* Param Change Bitmap sent to HAL */
 #define PARAM_BCN_INTERVAL_CHANGED                      (1 << 0)
 #define PARAM_SHORT_PREAMBLE_CHANGED                 (1 << 1)

+ 1 - 0
core/mac/src/pe/lim/lim_process_message_queue.c

@@ -1454,6 +1454,7 @@ void lim_process_messages(tpAniSirGlobal mac_ctx, tpSirMsgQ msg)
 	case eWNI_SME_EXT_CHANGE_CHANNEL:
 	case eWNI_SME_NDP_INITIATOR_REQ:
 	case eWNI_SME_NDP_RESPONDER_REQ:
+	case eWNI_SME_NDP_END_REQ:
 		/* These messages are from HDD.No need to respond to HDD */
 		lim_process_normal_hdd_msg(mac_ctx, msg, false);
 		break;

+ 1 - 0
core/mac/src/pe/lim/lim_process_sme_req_messages.c

@@ -5170,6 +5170,7 @@ bool lim_process_sme_req_messages(tpAniSirGlobal pMac, tpSirMsgQ pMsg)
 		break;
 	case eWNI_SME_PDEV_SET_HT_VHT_IE:
 		lim_process_set_pdev_IEs(pMac, pMsgBuf);
+	case eWNI_SME_NDP_END_REQ:
 	case eWNI_SME_NDP_INITIATOR_REQ:
 	case eWNI_SME_NDP_RESPONDER_REQ:
 		lim_handle_ndp_request_message(pMac, pMsg);

+ 81 - 1
core/mac/src/pe/nan/nan_datapath.c

@@ -216,6 +216,21 @@ responder_rsp:
 	return ret_val;
 }
 
+/**
+ * lim_ndp_delete_peers() - Delete peers if needed
+ * @mac_ctx: handle to mac context
+ * @ndp_map: peer map of returning ndp end rsp
+ * @num_peers: number of peers remaining after ndp end
+ * This function deletes a peer if there are no active NDPs left with that peer
+ *
+ * Return: None
+ */
+static void lim_ndp_delete_peers(tpAniSirGlobal mac_ctx,
+				 struct peer_ndp_map *ndp_map,
+				 uint8_t num_peers)
+{
+}
+
 /**
  * lim_handle_ndp_event_message() - Handler for NDP events/RSP from WMA
  * @mac_ctx: handle to mac structure
@@ -249,6 +264,20 @@ QDF_STATUS lim_handle_ndp_event_message(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 		status = lim_ndp_responder_rsp_handler(mac_ctx, msg->bodyptr,
 					msg->bodyval);
 		break;
+	case SIR_HAL_NDP_END_RSP: {
+		struct ndp_end_rsp_event *ndp_end_rsp = msg->bodyptr;
+		uint32_t rsp_len = sizeof(*ndp_end_rsp);
+
+		if (ndp_end_rsp && ndp_end_rsp->ndp_map) {
+			lim_ndp_delete_peers(mac_ctx, ndp_end_rsp->ndp_map,
+					     ndp_end_rsp->num_peers);
+			rsp_len += (ndp_end_rsp->num_peers *
+					sizeof(struct peer_ndp_map));
+		}
+		lim_send_ndp_event_to_sme(mac_ctx, eWNI_SME_NDP_END_RSP,
+				msg->bodyptr, rsp_len, msg->bodyval);
+		break;
+	}
 	default:
 		lim_log(mac_ctx, LOGE,
 			FL("Unhandled NDP event: %d"), msg->type);
@@ -361,6 +390,54 @@ send_failure_rsp:
 	return status;
 }
 
+/**
+ * lim_process_sme_ndp_data_end_req() - Handler for eWNI_SME_NDP_END_REQ
+ * from SME.
+ * @mac_ctx: handle to mac context
+ * @sme_msg: ndp data end request msg
+ *
+ * Return: Status of operation
+ */
+QDF_STATUS lim_process_sme_ndp_data_end_req(tpAniSirGlobal mac_ctx,
+					    struct sir_sme_ndp_end_req *sme_msg)
+{
+	tSirMsgQ msg;
+	uint32_t len;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+	if (NULL == sme_msg) {
+		lim_log(mac_ctx, LOGE, FL("invalid ndp_req"));
+		/* msg to unblock SME, but not send rsp to HDD */
+		lim_send_ndp_event_to_sme(mac_ctx, eWNI_SME_NDP_END_RSP, NULL,
+					  0, true);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	msg.type = SIR_HAL_NDP_END_REQ;
+	msg.reserved = 0;
+	len = sizeof(*sme_msg->req) + (sme_msg->req->num_ndp_instances *
+						 sizeof(uint32_t));
+	msg.bodyptr = qdf_mem_malloc(len);
+	if (NULL == msg.bodyptr) {
+		/* msg to unblock SME, but not send rsp to HDD */
+		lim_send_ndp_event_to_sme(mac_ctx, eWNI_SME_NDP_END_RSP, NULL,
+					  0, true);
+		return QDF_STATUS_E_NOMEM;
+	}
+	qdf_mem_copy(msg.bodyptr, sme_msg->req, len);
+	msg.bodyval = 0;
+
+	lim_log(mac_ctx, LOG1, FL("sending SIR_HAL_NDP_END_REQ to WMA"));
+	MTRACE(mac_trace_msg_tx(mac_ctx, NO_SESSION, msg.type));
+
+	if (eSIR_SUCCESS != wma_post_ctrl_msg(mac_ctx, &msg)) {
+		lim_log(mac_ctx, LOGP, FL("wma_post_ctrl_msg failed"));
+		status = QDF_STATUS_E_FAILURE;
+	}
+
+	return status;
+}
+
 /**
  * lim_handle_ndp_request_message() - Handler for NDP req from SME
  * @mac_ctx: handle to mac structure
@@ -374,6 +451,10 @@ QDF_STATUS lim_handle_ndp_request_message(tpAniSirGlobal mac_ctx,
 	QDF_STATUS status;
 
 	switch (msg->type) {
+	case eWNI_SME_NDP_END_REQ:
+		status = lim_process_sme_ndp_data_end_req(mac_ctx,
+							  msg->bodyptr);
+		break;
 	case eWNI_SME_NDP_INITIATOR_REQ:
 		status = lim_process_sme_ndp_initiator_req(mac_ctx,
 							   msg->bodyptr);
@@ -498,7 +579,6 @@ end:
 	}
 }
 
-
 /**
  * lim_send_sme_ndp_add_sta_rsp() - prepares and send new peer ind to SME
  * @mac_ctx: handle to mac structure

+ 1 - 1
core/sme/inc/csr_api.h

@@ -1384,7 +1384,7 @@ typedef struct tagCsrRoamInfo {
 		struct sme_ndp_peer_ind ndp_peer_ind_params;
 		struct ndp_schedule_update_rsp ndp_sched_upd_rsp_params;
 		struct ndp_end_indication_event ndp_end_ind_params;
-		struct ndp_end_rsp_event ndp_end_rsp_params;
+		struct ndp_end_rsp_event *ndp_end_rsp_params;
 		struct ndp_confirm_event ndp_confirm_params;
 		struct ndp_responder_rsp_event ndp_responder_rsp_params;
 		struct ndp_indication_event ndp_indication_params;

+ 1 - 0
core/sme/inc/sme_inside.h

@@ -195,6 +195,7 @@ typedef struct tagSmeCmd {
 #ifdef WLAN_FEATURE_NAN_DATAPATH
 		struct ndp_initiator_req initiator_req;
 		struct ndp_responder_req responder_req;
+		struct ndp_end_req *data_end_req;
 #endif
 	} u;
 } tSmeCmd;

+ 1 - 0
core/sme/inc/sme_internal.h

@@ -95,6 +95,7 @@ typedef enum eSmeCommandType {
 	e_sme_command_set_antenna_mode,
 	eSmeCommandNdpInitiatorRequest,
 	eSmeCommandNdpResponderRequest,
+	eSmeCommandNdpDataEndInitiatorRequest,
 } eSmeCommandType;
 
 typedef enum eSmeState {

+ 23 - 2
core/sme/inc/sme_nan_datapath.h

@@ -62,6 +62,19 @@ struct sir_sme_ndp_responder_req {
 	struct ndp_responder_req req;
 };
 
+/*
+ * struct sir_sme_ndp_end_req - sme request struct for ndp end req
+ * @msg_type: SME msg type(sir_sme_ndp_initiator_req)
+ * @msg_len: lenght of message
+ * @req: actual ndp initiator request
+ *
+ */
+struct sir_sme_ndp_end_req {
+	uint16_t msg_type;
+	uint16_t msg_len;
+	struct ndp_end_req *req;
+};
+
 /* NaN initiator request handler */
 QDF_STATUS sme_ndp_initiator_req_handler(tHalHandle hal,
 					struct ndp_initiator_req *req_params);
@@ -71,8 +84,7 @@ QDF_STATUS sme_ndp_responder_req_handler(tHalHandle hal,
 					struct ndp_responder_req *req_params);
 
 /* NAN indication response handler */
-QDF_STATUS sme_ndp_end_req_handler(uint32_t session_id,
-					struct ndp_end_req *req_params);
+QDF_STATUS sme_ndp_end_req_handler(tHalHandle hal, struct ndp_end_req *req);
 
 /* NAN schedule update request handler */
 QDF_STATUS sme_ndp_sched_req_handler(uint32_t session_id,
@@ -105,6 +117,8 @@ void csr_roam_update_ndp_return_params(tpAniSirGlobal mac_ctx,
 					struct tagCsrRoamInfo *roam_info);
 QDF_STATUS csr_process_ndp_initiator_request(tpAniSirGlobal mac_ctx,
 					     tSmeCmd *cmd);
+QDF_STATUS csr_process_ndp_data_end_request(tpAniSirGlobal mac_ctx,
+					    tSmeCmd *cmd);
 
 void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg);
 
@@ -168,5 +182,12 @@ static inline QDF_STATUS csr_process_ndp_responder_request(
 {
 	return QDF_STATUS_SUCCESS;
 }
+
+static inline QDF_STATUS csr_process_ndp_data_end_request(
+				tpAniSirGlobal mac_ctx, tSmeCmd *cmd)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
 #endif /* WLAN_FEATURE_NAN_DATAPATH */
 #endif /* __SME_NAN_DATAPATH_H */

+ 11 - 1
core/sme/src/common/sme_api.c

@@ -971,7 +971,16 @@ sme_process_cmd:
 		status = csr_process_ndp_responder_request(pMac, pCommand);
 		if (status != QDF_STATUS_SUCCESS) {
 			if (csr_ll_remove_entry(&pMac->sme.smeCmdActiveList,
-					     &pCommand->Link, LL_ACCESS_LOCK))
+					&pCommand->Link, LL_ACCESS_LOCK))
+				csr_release_command(pMac, pCommand);
+		}
+		break;
+	case eSmeCommandNdpDataEndInitiatorRequest:
+		csr_ll_unlock(&pMac->sme.smeCmdActiveList);
+		status = csr_process_ndp_data_end_request(pMac, pCommand);
+		if (status != QDF_STATUS_SUCCESS) {
+			if (csr_ll_remove_entry(&pMac->sme.smeCmdActiveList,
+					&pCommand->Link, LL_ACCESS_LOCK))
 				csr_release_command(pMac, pCommand);
 		}
 		break;
@@ -3009,6 +3018,7 @@ QDF_STATUS sme_process_msg(tHalHandle hHal, cds_msg_t *pMsg)
 	case eWNI_SME_NDP_INITIATOR_RSP:
 	case eWNI_SME_NDP_INDICATION:
 	case eWNI_SME_NDP_RESPONDER_RSP:
+	case eWNI_SME_NDP_END_RSP:
 		sme_ndp_msg_processor(pMac, pMsg);
 		break;
 	default:

+ 139 - 44
core/sme/src/nan/nan_datapath_api.c

@@ -84,6 +84,7 @@ QDF_STATUS sme_ndp_initiator_req_handler(tHalHandle hal,
 		cmd->u.initiator_req.ndp_config.ndp_cfg =
 			qdf_mem_malloc(req_params->ndp_config.ndp_cfg_len);
 		if (NULL == cmd->u.initiator_req.ndp_config.ndp_cfg) {
+			csr_release_command_roam(mac_ctx, cmd);
 			sme_release_global_lock(&mac_ctx->sme);
 			qdf_mem_free(
 				cmd->u.initiator_req.ndp_info.ndp_app_info);
@@ -103,6 +104,7 @@ QDF_STATUS sme_ndp_initiator_req_handler(tHalHandle hal,
 		qdf_mem_free(cmd->u.initiator_req.ndp_config.ndp_cfg);
 		cmd->u.initiator_req.ndp_info.ndp_app_info_len = 0;
 		cmd->u.initiator_req.ndp_config.ndp_cfg_len = 0;
+		csr_release_command_roam(mac_ctx, cmd);
 	}
 
 	sme_release_global_lock(&mac_ctx->sme);
@@ -193,17 +195,63 @@ QDF_STATUS sme_ndp_responder_req_handler(tHalHandle hal,
 
 /**
  * sme_ndp_end_req_handler() - ndp end request handler
- * @session_id: session id over which the ndp is being created
- * @req_params: request parameters
+ * @hal: hal handle
+ * @req: ndp end request parameters
  *
  * Return: QDF_STATUS_SUCCESS on success; error number otherwise
  */
-QDF_STATUS sme_ndp_end_req_handler(uint32_t session_id,
-	struct ndp_end_req *req_params)
+QDF_STATUS sme_ndp_end_req_handler(tHalHandle hal, struct ndp_end_req *req)
 {
-	return QDF_STATUS_SUCCESS;
-}
+	tSmeCmd *cmd;
+	QDF_STATUS ret = QDF_STATUS_SUCCESS;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
 
+	if (NULL == req) {
+		sms_log(mac_ctx, LOGE, FL("Invalid ndp end req"));
+		return QDF_STATUS_E_INVAL;
+	}
+
+	status = sme_acquire_global_lock(&mac_ctx->sme);
+	if (QDF_STATUS_SUCCESS != status) {
+		sms_log(mac_ctx, LOGE,
+		       FL("SME lock failed, status:%d"), status);
+		return QDF_STATUS_E_RESOURCES;
+	}
+	cmd = csr_get_command_buffer(mac_ctx);
+	if (NULL == cmd) {
+		sme_release_global_lock(&mac_ctx->sme);
+		return QDF_STATUS_E_RESOURCES;
+	}
+
+	cmd->command = eSmeCommandNdpDataEndInitiatorRequest;
+	cmd->u.data_end_req = qdf_mem_malloc(sizeof(*req) +
+				(req->num_ndp_instances * sizeof(uint32_t)));
+	if (NULL == cmd->u.data_end_req) {
+			csr_release_command_roam(mac_ctx, cmd);
+			sme_release_global_lock(&mac_ctx->sme);
+			return QDF_STATUS_E_NOMEM;
+	}
+
+	qdf_mem_copy(cmd->u.data_end_req, req, sizeof(*req));
+	cmd->u.data_end_req->ndp_ids =
+		(uint32_t *)((uint8_t *)&cmd->u.data_end_req[1]);
+	qdf_mem_copy(cmd->u.data_end_req->ndp_ids, req->ndp_ids,
+		 sizeof(uint32_t) * req->num_ndp_instances);
+
+	status = csr_queue_sme_command(mac_ctx, cmd, true);
+	if (QDF_STATUS_SUCCESS != status) {
+		sms_log(mac_ctx, LOGE, FL("SME enqueue failed, status:%d"),
+			status);
+		qdf_mem_free(cmd->u.data_end_req);
+		cmd->u.data_end_req = NULL;
+		ret = QDF_STATUS_E_FAILURE;
+		csr_release_command_roam(mac_ctx, cmd);
+	}
+
+	sme_release_global_lock(&mac_ctx->sme);
+	return ret;
+}
 
 /**
  * sme_ndp_sched_req_handler() - ndp schedule request handler
@@ -381,7 +429,6 @@ void csr_roam_update_ndp_return_params(tpAniSirGlobal mac_ctx,
 	}
 }
 
-
 /**
  * csr_process_ndp_initiator_request() - process ndp initiator request
  * @mac_ctx: Global MAC context
@@ -447,6 +494,7 @@ QDF_STATUS csr_process_ndp_responder_request(tpAniSirGlobal mac_ctx,
 
 	msg_len  = sizeof(*lim_msg);
 	lim_msg = qdf_mem_malloc(msg_len);
+
 	if (!lim_msg) {
 		sms_log(mac_ctx, LOGE, FL("Mem alloc fail"));
 		status = QDF_STATUS_E_NOMEM;
@@ -489,6 +537,46 @@ free_config:
 	return status;
 }
 
+/*
+ * csr_process_ndp_data_end_request() - process ndp data end request
+ * @mac_ctx: Global MAC context
+ * @cmd: sme command containing ndp initiator request
+ *
+ * Return: status of operation
+ */
+QDF_STATUS csr_process_ndp_data_end_request(tpAniSirGlobal mac_ctx,
+					    tSmeCmd *cmd)
+{
+	QDF_STATUS status;
+	struct sir_sme_ndp_end_req *lim_msg;
+	uint16_t msg_len;
+
+	if (NULL == cmd) {
+		sms_log(mac_ctx, LOGE, FL("NULL sme cmd"));
+		return QDF_STATUS_E_INVAL;
+	}
+
+	msg_len  = sizeof(*lim_msg);
+	lim_msg = qdf_mem_malloc(msg_len);
+	if (NULL == lim_msg) {
+		sms_log(mac_ctx, LOGE, FL("Malloc failed"));
+		qdf_mem_free(cmd->u.data_end_req);
+		cmd->u.data_end_req = NULL;
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	lim_msg->msg_type = (uint16_t)eWNI_SME_NDP_END_REQ;
+	lim_msg->msg_len = msg_len;
+	lim_msg->req = cmd->u.data_end_req;
+
+	status = cds_send_mb_message_to_mac(lim_msg);
+	if (status != QDF_STATUS_SUCCESS) {
+		qdf_mem_free(cmd->u.data_end_req);
+		cmd->u.data_end_req = NULL;
+	}
+	return status;
+}
+
 /**
  * sme_ndp_msg_processor() - message processor for ndp/ndi north-bound SME msg.
  * @mac_ctx: Global MAC context
@@ -511,6 +599,11 @@ void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 	eSmeCommandType cmd_to_rel = eSmeNoCommand;
 	bool send_to_user = true;
 
+	entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
+				 LL_ACCESS_LOCK);
+	if (entry != NULL)
+		cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
+
 	switch (msg->type) {
 	case eWNI_SME_NDP_CONFIRM_IND: {
 		result = eCSR_ROAM_RESULT_NDP_CONFIRM_IND;
@@ -570,6 +663,31 @@ void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 		release_active_cmd = true;
 		cmd_to_rel = eSmeCommandNdpResponderRequest;
 		break;
+	case eWNI_SME_NDP_END_RSP: {
+		if (true == msg->bodyval) {
+			/* rsp was locally generated, do not send to HDD */
+			send_to_user = false;
+		} else {
+			result = eCSR_ROAM_RESULT_NDP_END_RSP;
+			roam_info.ndp.ndp_end_rsp_params = msg->bodyptr;
+			/*
+			 * NDP_END_IND is independent of session, but session_id
+			 * is needed for csrRoamCallCallback(). Set it to 0
+			 * which is a valid session.
+			 */
+			session_id = 0;
+		}
+		release_active_cmd = true;
+		cmd_to_rel = eSmeCommandNdpDataEndInitiatorRequest;
+		/*
+		 * get num of ndp requested to terminated from sme command
+		 * being released
+		 */
+		if (cmd != NULL && cmd_to_rel == cmd->command)
+			roam_info.ndp.ndp_end_rsp_params->num_ndp_terminated =
+				cmd->u.data_end_req->num_ndp_instances;
+		break;
+	}
 	default:
 		sms_log(mac_ctx, LOGE, FL("Unhandled NDP rsp"));
 		qdf_mem_free(msg->bodyptr);
@@ -587,33 +705,10 @@ void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 	 */
 	switch (msg->type) {
 	case eWNI_SME_NDP_INITIATOR_RSP:
-		entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
-				LL_ACCESS_LOCK);
-		if (entry != NULL) {
-			cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
-			if (eSmeCommandNdpInitiatorRequest == cmd->command) {
-				qdf_mem_free(
-					cmd->u.initiator_req.
-					ndp_config.ndp_cfg);
-				qdf_mem_free(
-					cmd->u.initiator_req.
-					ndp_info.ndp_app_info);
-			}
-		}
-		break;
-	case eWNI_SME_NDP_RESPONDER_RSP:
-		entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
-					LL_ACCESS_LOCK);
-		if (entry != NULL) {
-			cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
-			if (eSmeCommandNdpResponderRequest == cmd->command) {
-				qdf_mem_free(
-					cmd->u.responder_req.
-					ndp_config.ndp_cfg);
-				qdf_mem_free(
-					cmd->u.responder_req.
-					ndp_info.ndp_app_info);
-			}
+		if (cmd && eSmeCommandNdpInitiatorRequest == cmd->command) {
+			qdf_mem_free(cmd->u.initiator_req.ndp_config.ndp_cfg);
+			qdf_mem_free(
+				cmd->u.initiator_req.ndp_info.ndp_app_info);
 		}
 		break;
 	case eWNI_SME_NDP_INDICATION:
@@ -621,22 +716,22 @@ void sme_ndp_msg_processor(tpAniSirGlobal mac_ctx, cds_msg_t *msg)
 			roam_info.ndp.ndp_indication_params.ndp_config.ndp_cfg);
 		qdf_mem_free(
 			roam_info.ndp.ndp_indication_params.
-			ndp_info.ndp_app_info);
+				ndp_info.ndp_app_info);
+		break;
+	case eWNI_SME_NDP_END_RSP:
+		if (cmd &&
+			eSmeCommandNdpDataEndInitiatorRequest == cmd->command) {
+			qdf_mem_free(cmd->u.data_end_req);
+			cmd->u.data_end_req = NULL;
+		}
 		break;
 	default:
 		break;
 	}
 	qdf_mem_free(msg->bodyptr);
-	if (release_active_cmd == false)
-		return;
-
-	entry = csr_ll_peek_head(&mac_ctx->sme.smeCmdActiveList,
-			LL_ACCESS_LOCK);
-	if (entry == NULL)
-		return;
+	msg->bodyptr = NULL;
 
-	cmd = GET_BASE_ADDR(entry, tSmeCmd, Link);
-	if (cmd_to_rel == cmd->command) {
+	if (release_active_cmd && cmd && cmd_to_rel == cmd->command) {
 		/* Now put this cmd back on the avilable command list */
 		if (csr_ll_remove_entry(&mac_ctx->sme.smeCmdActiveList,
 				     entry, LL_ACCESS_LOCK))

+ 5 - 0
core/wma/src/wma_main.c

@@ -5950,8 +5950,13 @@ QDF_STATUS wma_mc_process_msg(void *cds_context, cds_msg_t *msg)
 		wma_handle_ndp_initiator_req(wma_handle, msg->bodyptr);
 		qdf_mem_free(msg->bodyptr);
 		break;
+
 	case SIR_HAL_NDP_RESPONDER_REQ:
 		wma_handle_ndp_responder_req(wma_handle, msg->bodyptr);
+		break;
+
+	case SIR_HAL_NDP_END_REQ:
+		wma_handle_ndp_end_req(wma_handle, msg->bodyptr);
 		qdf_mem_free(msg->bodyptr);
 		break;
 	default:

+ 151 - 17
core/wma/src/wma_nan_datapath.c

@@ -283,14 +283,80 @@ send_ndi_responder_fail:
 /**
  * wma_handle_ndp_end_req() - NDP end request handler
  * @wma_handle: wma handle
- * @req_params: request parameters
+ * @ptr: request parameters
  *
  * Return: QDF_STATUS_SUCCESS on success; error number otherwise
  */
-QDF_STATUS wma_handle_ndp_end_req(tp_wma_handle wma_handle,
-				struct ndp_end_req *req_params)
+QDF_STATUS wma_handle_ndp_end_req(tp_wma_handle wma_handle, void *ptr)
 {
+	int ret;
+	uint16_t len;
+	uint32_t ndp_end_req_len, i;
+	wmi_ndp_end_req *ndp_end_req_lst;
+	wmi_buf_t buf;
+	cds_msg_t pe_msg = {0};
+	wmi_ndp_end_req_fixed_param *cmd;
+	struct ndp_end_rsp_event end_rsp = {0};
+	struct ndp_end_req *req = ptr;
+
+	if (NULL == req) {
+		WMA_LOGE(FL("Invalid ndp_end_req"));
+		goto send_ndp_end_fail;
+	}
+
+	/* len of tlv following fixed param  */
+	ndp_end_req_len = sizeof(wmi_ndp_end_req) * req->num_ndp_instances;
+	/* above comes out to 4 byte alligned already, no need of padding */
+	len = sizeof(*cmd) + ndp_end_req_len + WMI_TLV_HDR_SIZE;
+	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
+	if (!buf) {
+		WMA_LOGE(FL("Malloc failed"));
+		return QDF_STATUS_E_NOMEM;
+	}
+	cmd = (wmi_ndp_end_req_fixed_param *) wmi_buf_data(buf);
+
+	WMITLV_SET_HDR(&cmd->tlv_header,
+		       WMITLV_TAG_STRUC_wmi_ndp_end_req_fixed_param,
+		       WMITLV_GET_STRUCT_TLVLEN(wmi_ndp_end_req_fixed_param));
+
+	cmd->transaction_id = req->transaction_id;
+
+	/* set tlv pointer to end of fixed param */
+	WMITLV_SET_HDR((uint8_t *)&cmd[1], WMITLV_TAG_ARRAY_STRUC,
+			ndp_end_req_len);
+
+	ndp_end_req_lst = (wmi_ndp_end_req *)((uint8_t *)&cmd[1] +
+						WMI_TLV_HDR_SIZE);
+	for (i = 0; i < req->num_ndp_instances; i++) {
+		WMITLV_SET_HDR(&ndp_end_req_lst[i],
+				WMITLV_TAG_ARRAY_FIXED_STRUC,
+				(sizeof(*ndp_end_req_lst) - WMI_TLV_HDR_SIZE));
+
+		ndp_end_req_lst[i].ndp_instance_id = req->ndp_ids[i];
+	}
+
+	WMA_LOGD(FL("Sending WMI_NDP_END_REQ_CMDID to FW"));
+	ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
+				   WMI_NDP_END_REQ_CMDID);
+	if (ret < 0) {
+		WMA_LOGE(FL("WMI_NDP_END_REQ_CMDID failed, ret: %d"), ret);
+		wmi_buf_free(buf);
+		goto send_ndp_end_fail;
+	}
 	return QDF_STATUS_SUCCESS;
+
+send_ndp_end_fail:
+	pe_msg.type = SIR_HAL_NDP_END_RSP;
+	if (req) {
+		end_rsp.status = NDP_CMD_RSP_STATUS_ERROR;
+		end_rsp.transaction_id = req->transaction_id;
+		pe_msg.bodyptr = &end_rsp;
+	} else {
+		pe_msg.bodyval = true;
+	}
+
+	wma_handle->pe_ndp_event_handler(wma_handle->mac_context, &pe_msg);
+	return QDF_STATUS_E_FAILURE;
 }
 
 /**
@@ -328,11 +394,24 @@ static int wma_ndp_indication_event_handler(void *handle, uint8_t *event_info,
 	fixed_params =
 		(wmi_ndp_indication_event_fixed_param *)event->fixed_param;
 
-	WMA_LOGD(FL("WMI_NDP_INDICATION_EVENTID(0x%X) received. vdev %d, service_instance %d, ndp_instance %d, role %d, policy %d"),
+	ind_event.vdev_id = fixed_params->vdev_id;
+	ind_event.service_instance_id = fixed_params->service_instance_id;
+	ind_event.ndp_instance_id = fixed_params->ndp_instance_id;
+	ind_event.role = fixed_params->self_ndp_role;
+	ind_event.policy = fixed_params->accept_policy;
+
+	WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr,
+				ind_event.peer_mac_addr.bytes);
+	WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_discovery_mac_addr,
+				ind_event.peer_discovery_mac_addr.bytes);
+
+	WMA_LOGD(FL("WMI_NDP_INDICATION_EVENTID(0x%X) received. vdev %d, service_instance %d, ndp_instance %d, role %d, policy %d, peer_mac_addr: %pM, peer_disc_mac_addr: %pM"),
 		 WMI_NDP_INDICATION_EVENTID, fixed_params->vdev_id,
 		 fixed_params->service_instance_id,
 		 fixed_params->ndp_instance_id, fixed_params->self_ndp_role,
-		 fixed_params->accept_policy);
+		 fixed_params->accept_policy,
+		 ind_event.peer_mac_addr.bytes,
+		 ind_event.peer_discovery_mac_addr.bytes);
 
 	WMA_LOGD(FL("ndp_cfg - %d bytes"), fixed_params->ndp_cfg_len);
 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG,
@@ -342,17 +421,6 @@ static int wma_ndp_indication_event_handler(void *handle, uint8_t *event_info,
 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG,
 			&event->ndp_app_info, fixed_params->ndp_app_info_len);
 
-	ind_event.vdev_id = fixed_params->vdev_id;
-	ind_event.service_instance_id = fixed_params->service_instance_id;
-	ind_event.ndp_instance_id = fixed_params->ndp_instance_id;
-	ind_event.role = fixed_params->self_ndp_role;
-	ind_event.policy = fixed_params->accept_policy;
-
-	WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_ndi_mac_addr,
-				ind_event.peer_mac_addr.bytes);
-	WMI_MAC_ADDR_TO_CHAR_ARRAY(&fixed_params->peer_discovery_mac_addr,
-				ind_event.peer_discovery_mac_addr.bytes);
-
 	ind_event.ndp_config.ndp_cfg_len = fixed_params->ndp_cfg_len;
 	ind_event.ndp_info.ndp_app_info_len = fixed_params->ndp_app_info_len;
 
@@ -505,7 +573,73 @@ static int wma_ndp_confirm_event_handler(void *handle, uint8_t *event_info,
 static int wma_ndp_end_response_event_handler(void *handle,
 					uint8_t *event_info, uint32_t len)
 {
-	return 0;
+	int ret = 0;
+	QDF_STATUS status;
+	cds_msg_t pe_msg = {0};
+	struct ndp_end_rsp_event *end_rsp;
+	WMI_NDP_END_RSP_EVENTID_param_tlvs *event;
+	wmi_ndp_end_rsp_event_fixed_param *fixed_params = NULL;
+	wmi_ndp_end_rsp_per_ndi *end_rsp_tlv;
+	uint32_t i;
+	uint32_t len_end_rsp;
+	tp_wma_handle wma_handle = handle;
+
+	event = (WMI_NDP_END_RSP_EVENTID_param_tlvs *) event_info;
+	fixed_params = (wmi_ndp_end_rsp_event_fixed_param *)event->fixed_param;
+	WMA_LOGD(FL("WMI_NDP_END_RSP_EVENTID(0x%X) recieved. transaction_id: %d, rsp_status: %d, reason_code: %d"),
+		 WMI_NDP_END_RSP_EVENTID, fixed_params->transaction_id,
+		 fixed_params->rsp_status, fixed_params->reason_code);
+
+	len_end_rsp = sizeof(*end_rsp) + (event->num_ndp_end_rsp_per_ndi_list *
+						sizeof(struct peer_ndp_map));
+	end_rsp = qdf_mem_malloc(len_end_rsp);
+	if (NULL == end_rsp) {
+		WMA_LOGE("malloc failed");
+		pe_msg.bodyval = true;
+		ret = -ENOMEM;
+		goto send_ndp_end_rsp;
+	}
+	pe_msg.bodyptr = end_rsp;
+	qdf_mem_zero(end_rsp, len_end_rsp);
+
+	end_rsp->transaction_id = fixed_params->transaction_id;
+	end_rsp->reason = fixed_params->reason_code;
+	end_rsp->status = fixed_params->rsp_status;
+
+	if (end_rsp->status == NDP_CMD_RSP_STATUS_SUCCESS) {
+		WMA_LOGD(FL("NDP end rsp, num_peers: %d"),
+			event->num_ndp_end_rsp_per_ndi_list);
+		end_rsp->num_peers = event->num_ndp_end_rsp_per_ndi_list;
+		if (end_rsp->num_peers == 0) {
+			WMA_LOGE(FL("num_peers in NDP rsp should not be 0."));
+			end_rsp->status = NDP_CMD_RSP_STATUS_ERROR;
+			goto send_ndp_end_rsp;
+		}
+		/* copy per peer response to return path buffer */
+		end_rsp_tlv = event->ndp_end_rsp_per_ndi_list;
+		for (i = 0; i < end_rsp->num_peers; i++) {
+			end_rsp->ndp_map[i].vdev_id = end_rsp_tlv[i].vdev_id;
+			WMI_MAC_ADDR_TO_CHAR_ARRAY(
+				&end_rsp_tlv[i].peer_mac_addr,
+				end_rsp->ndp_map[i].peer_ndi_mac_addr.bytes);
+			end_rsp->ndp_map[i].num_active_ndp_sessions =
+					end_rsp_tlv[i].num_active_ndps_on_ndi;
+			WMA_LOGD(FL("vdev_id: %d, peer_addr: %pM, active_ndp_left: %d"),
+				end_rsp->ndp_map[i].vdev_id,
+				&end_rsp->ndp_map[i].peer_ndi_mac_addr,
+				end_rsp->ndp_map[i].num_active_ndp_sessions);
+		}
+	}
+
+send_ndp_end_rsp:
+	pe_msg.type = SIR_HAL_NDP_END_RSP;
+	status = wma_handle->pe_ndp_event_handler(wma_handle->mac_context,
+						  &pe_msg);
+	if (!QDF_IS_STATUS_SUCCESS(status))
+		ret = -EINVAL;
+
+	qdf_mem_free(end_rsp);
+	return ret;
 }
 
 /**

+ 7 - 0
core/wma/src/wma_nan_datapath.h

@@ -59,6 +59,7 @@ void wma_ndp_wow_event_callback(void *handle, void *event, uint32_t len);
 void wma_add_bss_ndi_mode(tp_wma_handle wma, tpAddBssParams add_bss);
 void wma_add_sta_ndi_mode(tp_wma_handle wma, tpAddStaParams add_sta);
 QDF_STATUS wma_handle_ndp_initiator_req(tp_wma_handle wma_handle, void *req);
+QDF_STATUS wma_handle_ndp_end_req(tp_wma_handle wma_handle, void *req);
 #else
 #define WMA_IS_VDEV_IN_NDI_MODE(intf, vdev_id) (false)
 static inline void wma_update_hdd_cfg_ndp(tp_wma_handle wma_handle,
@@ -96,5 +97,11 @@ static inline QDF_STATUS wma_handle_ndp_responder_req(tp_wma_handle wma_handle,
 	return QDF_STATUS_SUCCESS;
 }
 
+static inline QDF_STATUS wma_handle_ndp_end_req(tp_wma_handle wma_handle,
+						void *req)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
 #endif /* WLAN_FEATURE_NAN_DATAPATH */
 #endif /* __WMA_NAN_DATAPATH_H */