Revert "qcacmn: Code movement to enable multipass support without WDS"

This reverts commit Iaafa8dc4f16314d9e3e160fe01251c3684adbf67.

Change-Id: I3e6ec2b2c018c9fc6d0dcdbf9a580c93e23b0458
CRs-Fixed: 3478331
This commit is contained in:
Rakesh Pillai
2023-04-25 02:14:49 -07:00
committed by Madan Koyyalamudi
parent 6f3d208ec8
commit 8d8312ddf7
6 changed files with 388 additions and 396 deletions

View File

@@ -61,11 +61,6 @@
#endif
#endif
#define DP_MAX_VLAN_IDS 4096
#define DP_VLAN_UNTAGGED 0
#define DP_VLAN_TAGGED_MULTICAST 1
#define DP_VLAN_TAGGED_UNICAST 2
/**
* struct htt_dbgfs_cfg - structure to maintain required htt data
* @msg_word: htt msg sent to upper layer

View File

@@ -12653,75 +12653,3 @@ void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
}
#endif
#ifdef QCA_MULTIPASS_SUPPORT
QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
uint16_t vlan_id, uint16_t group_key)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_TX_MULTIPASS);
QDF_STATUS status;
if (!vdev || !vdev->multipass_en) {
status = QDF_STATUS_E_INVAL;
goto fail;
}
if (!vdev->iv_vlan_map) {
uint16_t vlan_map_size = (sizeof(uint16_t)) * DP_MAX_VLAN_IDS;
vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size);
if (!vdev->iv_vlan_map) {
QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map");
status = QDF_STATUS_E_NOMEM;
goto fail;
}
/*
* 0 is invalid group key.
* Initilalize array with invalid group keys.
*/
qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size);
}
if (vlan_id >= DP_MAX_VLAN_IDS) {
status = QDF_STATUS_E_INVAL;
goto fail;
}
vdev->iv_vlan_map[vlan_id] = group_key;
status = QDF_STATUS_SUCCESS;
fail:
if (vdev)
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
return status;
}
void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
{
struct dp_txrx_peer *txrx_peer = NULL;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id);
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (vdev->iv_vlan_map) {
qdf_mem_free(vdev->iv_vlan_map);
vdev->iv_vlan_map = NULL;
}
qdf_spinlock_destroy(&vdev->mpass_peer_mutex);
}
void dp_peer_multipass_list_init(struct dp_vdev *vdev)
{
/*
* vdev->iv_vlan_map is allocated when the first configuration command
* is issued to avoid unnecessary allocation for regular mode VAP.
*/
TAILQ_INIT(&vdev->mpass_peer_list);
qdf_spinlock_create(&vdev->mpass_peer_mutex);
}
#endif /* QCA_MULTIPASS_SUPPORT */

View File

@@ -3850,95 +3850,3 @@ bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
}
qdf_export_symbol(dp_peer_find_by_id_valid);
#ifdef QCA_MULTIPASS_SUPPORT
void dp_peer_multipass_list_remove(struct dp_peer *peer)
{
struct dp_vdev *vdev = peer->vdev;
struct dp_txrx_peer *tpeer = NULL;
bool found = 0;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
if (tpeer == peer->txrx_peer) {
found = 1;
TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
mpass_peer_list_elem);
break;
}
}
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (found)
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
}
/**
* dp_peer_multipass_list_add() - add to new multipass list
* @soc: soc handle
* @peer_mac: mac address
* @vdev_id: vdev id for peer
* @vlan_id: vlan_id
*
* return: void
*/
static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
uint8_t vdev_id, uint16_t vlan_id)
{
struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
vdev_id,
DP_MOD_ID_TX_MULTIPASS);
if (qdf_unlikely(!peer)) {
qdf_err("NULL peer");
return;
}
if (qdf_unlikely(!peer->txrx_peer))
goto fail;
/* If peer already exists in vdev multipass list, do not add it.
* This may happen if key install comes twice or re-key
* happens for a peer.
*/
if (peer->txrx_peer->vlan_id) {
dp_debug("peer already added to vdev multipass list"
"MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
QDF_MAC_ADDR_REF(peer->mac_addr.raw),
peer->txrx_peer->vlan_id);
goto fail;
}
/*
* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it when element is deleted from the list.
*/
peer->txrx_peer->vlan_id = vlan_id;
qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
peer->txrx_peer,
mpass_peer_list_elem);
qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
return;
fail:
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
}
void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
uint8_t vdev_id, uint8_t *peer_mac,
uint16_t vlan_id)
{
struct dp_soc *soc = (struct dp_soc *)cdp_soc;
struct dp_vdev *vdev =
dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
DP_MOD_ID_TX_MULTIPASS);
if (vdev && vdev->multipass_en) {
dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
}
}
#endif /* QCA_MULTIPASS_SUPPORT */

View File

@@ -42,7 +42,6 @@
#ifdef DP_RATETABLE_SUPPORT
#include "dp_ratetable.h"
#endif
#include "enet.h"
#ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
@@ -3300,33 +3299,3 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc,
return false;
}
#endif
#ifdef QCA_MULTIPASS_SUPPORT
bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
uint8_t tid)
{
struct vlan_ethhdr *vethhdrp;
if (qdf_unlikely(!txrx_peer->vlan_id))
return true;
vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
/*
* h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
* as it is expected to be padded by 0
* return false if frame doesn't have above tag so that caller will
* drop the frame.
*/
if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
return false;
vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
(txrx_peer->vlan_id & VLAN_VID_MASK));
if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
return true;
}
#endif /* QCA_MULTIPASS_SUPPORT */

View File

@@ -6660,199 +6660,3 @@ void dp_pkt_get_timestamp(uint64_t *time)
}
#endif
#ifdef QCA_MULTIPASS_SUPPORT
void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
uint16_t group_key)
{
struct htt_tx_msdu_desc_ext2_t *meta_data =
(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
/*
* When attempting to send a multicast packet with multi-passphrase,
* host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
* ref htt.h indicating the group_id field in "key_flags" also having
* "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
*/
HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0],
1);
HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
}
void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
struct vlan_ethhdr veth_hdr;
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data;
/*
* Extract VLAN header of 4 bytes:
* Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4],
* EtherType[2], Payload}
* Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02
* 08 00 45 00 00...
* After Removal : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00
* 00...
*/
qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr));
qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN);
veh = (struct vlan_ethhdr *)nbuf->data;
qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE);
}
#if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
defined(WLAN_MCAST_MLO)
/**
* dp_tx_need_mcast_reinject() - If frame needs to be processed in reinject path
* @vdev: DP vdev handle
*
* Return: true if reinject handling is required else false
*/
static inline bool
dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
{
if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
return true;
return false;
}
#else
static inline bool
dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
{
return false;
}
#endif
/**
* dp_tx_need_multipass_process() - If frame needs multipass phrase processing
* @soc: dp soc handle
* @vdev: DP vdev handle
* @buf: frame
* @vlan_id: vlan id of frame
*
* Return: whether peer is special or classic
*/
static
uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t buf, uint16_t *vlan_id)
{
struct dp_txrx_peer *txrx_peer = NULL;
struct dp_peer *peer = NULL;
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
struct vlan_ethhdr *veh = NULL;
bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
(htons(eh->ether_type) != ETH_P_8021Q));
if (qdf_unlikely(not_vlan))
return DP_VLAN_UNTAGGED;
veh = (struct vlan_ethhdr *)eh;
*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
/* look for handling of multicast packets in reinject path */
if (dp_tx_need_mcast_reinject(vdev))
return DP_VLAN_UNTAGGED;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
mpass_peer_list_elem) {
if (*vlan_id == txrx_peer->vlan_id) {
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_TAGGED_MULTICAST;
}
}
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_UNTAGGED;
}
peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
DP_MOD_ID_TX_MULTIPASS);
if (qdf_unlikely(!peer))
return DP_VLAN_UNTAGGED;
/*
* Do not drop the frame when vlan_id doesn't match.
* Send the frame as it is.
*/
if (*vlan_id == peer->txrx_peer->vlan_id) {
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return DP_VLAN_TAGGED_UNICAST;
}
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return DP_VLAN_UNTAGGED;
}
bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf,
struct dp_tx_msdu_info_s *msdu_info)
{
uint16_t vlan_id = 0;
uint16_t group_key = 0;
uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
qdf_nbuf_t nbuf_copy = NULL;
if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0]))
return true;
is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
(is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
return true;
if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
dp_tx_remove_vlan_tag(vdev, nbuf);
return true;
}
/* AP can have classic clients, special clients &
* classic repeaters.
* 1. Classic clients & special client:
* Remove vlan header, find corresponding group key
* index, fill in metaheader and enqueue multicast
* frame to TCL.
* 2. Classic repeater:
* Pass through to classic repeater with vlan tag
* intact without any group key index. Hardware
* will know which key to use to send frame to
* repeater.
*/
nbuf_copy = qdf_nbuf_copy(nbuf);
/*
* Send multicast frame to special peers even
* if pass through to classic repeater fails.
*/
if (nbuf_copy) {
struct dp_tx_msdu_info_s msdu_info_copy;
qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
&msdu_info_copy,
HTT_INVALID_PEER, NULL);
if (nbuf_copy) {
qdf_nbuf_free(nbuf_copy);
qdf_err("nbuf_copy send failed");
}
}
group_key = vdev->iv_vlan_map[vlan_id];
/*
* If group key is not installed, drop the frame.
*/
if (!group_key)
return false;
dp_tx_remove_vlan_tag(vdev, nbuf);
dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
msdu_info->exception_fw = 1;
return true;
}
#endif /* QCA_MULTIPASS_SUPPORT */

View File

@@ -32,6 +32,10 @@
/* Generic AST entry aging timer value */
#define DP_AST_AGING_TIMER_DEFAULT_MS 5000
#define DP_VLAN_UNTAGGED 0
#define DP_VLAN_TAGGED_MULTICAST 1
#define DP_VLAN_TAGGED_UNICAST 2
#define DP_MAX_VLAN_IDS 4096
#define DP_INVALID_AST_IDX 0xffff
#define DP_INVALID_FLOW_PRIORITY 0xff
#define DP_PEER_AST0_FLOW_MASK 0x4
@@ -545,8 +549,392 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
}
#endif
#ifdef QCA_MULTIPASS_SUPPORT
void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info, uint16_t group_key)
{
struct htt_tx_msdu_desc_ext2_t *meta_data =
(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
/*
* When attempting to send a multicast packet with multi-passphrase,
* host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
* ref htt.h indicating the group_id field in "key_flags" also having
* "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
*/
HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0], 1);
HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
}
void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
struct vlan_ethhdr veth_hdr;
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data;
/*
* Extract VLAN header of 4 bytes:
* Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4], EtherType[2], Payload}
* Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02 08 00 45 00 00...
* After Removal : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00 00...
*/
qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr));
qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN);
veh = (struct vlan_ethhdr *)nbuf->data;
qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE);
return;
}
#if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
defined(WLAN_MCAST_MLO)
/**
* dp_tx_need_mcast_reinject() - If frame needs to be processed in reinject path
* @vdev: DP vdev handle
*
* Return: true if reinject handling is required else false
*/
static inline bool
dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
{
if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
return true;
return false;
}
#else
static inline bool
dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
{
return false;
}
#endif
/**
* dp_tx_need_multipass_process() - If frame needs multipass phrase processing
* @soc: dp soc handle
* @vdev: DP vdev handle
* @buf: frame
* @vlan_id: vlan id of frame
*
* Return: whether peer is special or classic
*/
static
uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t buf, uint16_t *vlan_id)
{
struct dp_txrx_peer *txrx_peer = NULL;
struct dp_peer *peer = NULL;
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
struct vlan_ethhdr *veh = NULL;
bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
(htons(eh->ether_type) != ETH_P_8021Q));
if (qdf_unlikely(not_vlan))
return DP_VLAN_UNTAGGED;
veh = (struct vlan_ethhdr *)eh;
*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
/* look for handling of multicast packets in reinject path */
if (dp_tx_need_mcast_reinject(vdev))
return DP_VLAN_UNTAGGED;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
mpass_peer_list_elem) {
if (*vlan_id == txrx_peer->vlan_id) {
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_TAGGED_MULTICAST;
}
}
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_UNTAGGED;
}
peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
DP_MOD_ID_TX_MULTIPASS);
if (qdf_unlikely(peer == NULL))
return DP_VLAN_UNTAGGED;
/*
* Do not drop the frame when vlan_id doesn't match.
* Send the frame as it is.
*/
if (*vlan_id == peer->txrx_peer->vlan_id) {
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return DP_VLAN_TAGGED_UNICAST;
}
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return DP_VLAN_UNTAGGED;
}
bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf,
struct dp_tx_msdu_info_s *msdu_info)
{
uint16_t vlan_id = 0;
uint16_t group_key = 0;
uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
qdf_nbuf_t nbuf_copy = NULL;
if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0])) {
return true;
}
is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
(is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
return true;
if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
dp_tx_remove_vlan_tag(vdev, nbuf);
return true;
}
/* AP can have classic clients, special clients &
* classic repeaters.
* 1. Classic clients & special client:
* Remove vlan header, find corresponding group key
* index, fill in metaheader and enqueue multicast
* frame to TCL.
* 2. Classic repeater:
* Pass through to classic repeater with vlan tag
* intact without any group key index. Hardware
* will know which key to use to send frame to
* repeater.
*/
nbuf_copy = qdf_nbuf_copy(nbuf);
/*
* Send multicast frame to special peers even
* if pass through to classic repeater fails.
*/
if (nbuf_copy) {
struct dp_tx_msdu_info_s msdu_info_copy;
qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, &msdu_info_copy, HTT_INVALID_PEER, NULL);
if (nbuf_copy) {
qdf_nbuf_free(nbuf_copy);
qdf_err("nbuf_copy send failed");
}
}
group_key = vdev->iv_vlan_map[vlan_id];
/*
* If group key is not installed, drop the frame.
*/
if (!group_key)
return false;
dp_tx_remove_vlan_tag(vdev, nbuf);
dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
msdu_info->exception_fw = 1;
return true;
}
bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
uint8_t tid)
{
struct vlan_ethhdr *vethhdrp;
if (qdf_unlikely(!txrx_peer->vlan_id))
return true;
vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
/*
* h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
* as it is expected to be padded by 0
* return false if frame doesn't have above tag so that caller will
* drop the frame.
*/
if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
return false;
vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
(txrx_peer->vlan_id & VLAN_VID_MASK));
if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
return true;
}
#endif /* QCA_MULTIPASS_SUPPORT */
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
#ifdef QCA_MULTIPASS_SUPPORT
void dp_peer_multipass_list_remove(struct dp_peer *peer)
{
struct dp_vdev *vdev = peer->vdev;
struct dp_txrx_peer *tpeer = NULL;
bool found = 0;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
if (tpeer == peer->txrx_peer) {
found = 1;
TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
mpass_peer_list_elem);
break;
}
}
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (found)
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
}
/**
* dp_peer_multipass_list_add() - add to new multipass list
* @soc: soc handle
* @peer_mac: mac address
* @vdev_id: vdev id for peer
* @vlan_id: vlan_id
*
* return: void
*/
static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
uint8_t vdev_id, uint16_t vlan_id)
{
struct dp_peer *peer =
dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
vdev_id,
DP_MOD_ID_TX_MULTIPASS);
if (qdf_unlikely(!peer)) {
qdf_err("NULL peer");
return;
}
if (qdf_unlikely(!peer->txrx_peer))
goto fail;
/* If peer already exists in vdev multipass list, do not add it.
* This may happen if key install comes twice or re-key
* happens for a peer.
*/
if (peer->txrx_peer->vlan_id) {
dp_debug("peer already added to vdev multipass list"
"MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
QDF_MAC_ADDR_REF(peer->mac_addr.raw),
peer->txrx_peer->vlan_id);
goto fail;
}
/*
* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it when element is deleted from the list.
*/
peer->txrx_peer->vlan_id = vlan_id;
qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
peer->txrx_peer,
mpass_peer_list_elem);
qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
return;
fail:
dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
return;
}
void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
uint8_t vdev_id, uint8_t *peer_mac,
uint16_t vlan_id)
{
struct dp_soc *soc = (struct dp_soc *)cdp_soc;
struct dp_vdev *vdev =
dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
DP_MOD_ID_TX_MULTIPASS);
if (vdev && vdev->multipass_en) {
dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
}
}
QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
uint16_t vlan_id, uint16_t group_key)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_TX_MULTIPASS);
QDF_STATUS status;
if (!vdev || !vdev->multipass_en) {
status = QDF_STATUS_E_INVAL;
goto fail;
}
if (!vdev->iv_vlan_map) {
uint16_t vlan_map_size = (sizeof(uint16_t))*DP_MAX_VLAN_IDS;
vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size);
if (!vdev->iv_vlan_map) {
QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map");
status = QDF_STATUS_E_NOMEM;
goto fail;
}
/*
* 0 is invalid group key.
* Initilalize array with invalid group keys.
*/
qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size);
}
if (vlan_id >= DP_MAX_VLAN_IDS) {
status = QDF_STATUS_E_INVAL;
goto fail;
}
vdev->iv_vlan_map[vlan_id] = group_key;
status = QDF_STATUS_SUCCESS;
fail:
if (vdev)
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
return status;
}
void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
{
struct dp_txrx_peer *txrx_peer = NULL;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
qdf_err("Peers present in mpass list : %d", txrx_peer->peer_id);
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (vdev->iv_vlan_map) {
qdf_mem_free(vdev->iv_vlan_map);
vdev->iv_vlan_map = NULL;
}
qdf_spinlock_destroy(&vdev->mpass_peer_mutex);
}
void dp_peer_multipass_list_init(struct dp_vdev *vdev)
{
/*
* vdev->iv_vlan_map is allocated when the first configuration command
* is issued to avoid unnecessary allocation for regular mode VAP.
*/
TAILQ_INIT(&vdev->mpass_peer_list);
qdf_spinlock_create(&vdev->mpass_peer_mutex);
}
#endif /* QCA_MULTIPASS_SUPPORT */
#ifdef QCA_PEER_MULTIQ_SUPPORT
void dp_peer_reset_flowq_map(struct dp_peer *peer)