qca-wifi: Handling vlan tagged frames for multipass phrase feature

1. Remove vlan tag in tx and enqueue to hardware.
2. Add vlan tag in rx after peer-vlan_id lookup.

Change-Id: I37c34b1d54a497700101e78ad6930b91aa6b43e9
This commit is contained in:
Varsha Mishra
2019-07-31 20:22:06 +05:30
parent b6bd878499
commit 52e3f7f1ae

View File

@@ -15,16 +15,23 @@
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "../../../cmn_dev/fw_hdr/fw/htt.h"
#include "dp_peer.h"
#include "hal_rx.h"
#include "hal_api.h"
#include "qdf_nbuf.h"
#include "dp_types.h"
#include "dp_internal.h"
#include "dp_tx.h"
#include "enet.h"
#include "dp_txrx_wds.h"
/* Generic AST entry aging timer value */
#define DP_AST_AGING_TIMER_DEFAULT_MS 1000
#define DP_VLAN_UNTAGGED 0
#define DP_VLAN_TAGGED_MULTICAST 1
#define DP_VLAN_TAGGED_UNICAST 2
#define DP_MAX_VLAN_IDS 4096
static void dp_ast_aging_timer_fn(void *soc_hdl)
{
@@ -375,3 +382,388 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
return 0;
}
#endif
/**
* dp_tx_add_groupkey_metadata - Add group key in metadata
* @vdev: DP vdev handle
* @msdu_info: MSDU info to be setup in MSDU descriptor
* @group_key: Group key index programmed in metadata
*
* Return: void
*/
#ifdef QCA_MULTIPASS_SUPPORT
static
void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info, uint16_t group_key)
{
struct htt_tx_msdu_desc_ext2_t *meta_data =
(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
/*
* When attempting to send a multicast packet with multi-passphrase,
* host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
* ref htt.h indicating the group_id field in "key_flags" also having
* "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
*/
HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0], 1);
HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
}
/**
* dp_tx_remove_vlan_tag - Remove 4 bytes of vlan tag
* @vdev: DP vdev handle
* @tx_desc: Tx Descriptor Handle
*
* Return: void
*/
static
void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
struct vlan_ethhdr veth_hdr;
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data;
/*
* Extract VLAN header of 4 bytes:
* Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4], EtherType[2], Payload}
* Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02 08 00 45 00 00...
* After Removal : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00 00...
*/
qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr));
qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN);
veh = (struct vlan_ethhdr *)nbuf->data;
qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE);
return;
}
/**
* dp_tx_need_multipass_process - If frame needs multipass phrase processing
* @vdev: DP vdev handle
* @tx_desc: Tx Descriptor Handle
* @vlan_id: vlan id of frame
*
* Return: whether peer is special or classic
*/
static
uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t buf, uint16_t *vlan_id)
{
struct dp_peer *peer = NULL;
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
struct vlan_ethhdr *veh = NULL;
bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
(htons(eh->ether_type) != ETH_P_8021Q));
if (qdf_unlikely(not_vlan))
return DP_VLAN_UNTAGGED;
veh = (struct vlan_ethhdr *)eh;
*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(peer, &vdev->mpass_peer_list,
mpass_peer_list_elem) {
if (*vlan_id == peer->vlan_id) {
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_TAGGED_MULTICAST;
}
}
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
return DP_VLAN_UNTAGGED;
}
peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL);
if (qdf_unlikely(peer == NULL))
goto unref_and_return;
/*
* Do not drop the frame when vlan_id doesn't match.
* Send the frame as it is.
*/
if (*vlan_id == peer->vlan_id) {
dp_peer_unref_delete(peer);
return DP_VLAN_TAGGED_UNICAST;
}
unref_and_return:
dp_peer_unref_delete(peer);
return DP_VLAN_UNTAGGED;
}
/**
* dp_tx_multipass_process - Process vlan frames in tx path
* @soc: dp soc handle
* @vdev: DP vdev handle
* @nbuf: skb
* @msdu_info: msdu descriptor
*
* Return: status whether frame needs to be dropped or transmitted
*/
bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf,
struct dp_tx_msdu_info_s *msdu_info)
{
uint16_t vlan_id;
qdf_nbuf_t nbuf_copy;
uint16_t group_key;
uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
(is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
return true;
if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
dp_tx_remove_vlan_tag(vdev, nbuf);
return true;
}
/* AP can have classic clients, special clients &
* classic repeaters.
* 1. Classic clients & special client:
* Remove vlan header, find corresponding group key
* index, fill in metaheader and enqueue multicast
* frame to TCL.
* 2. Classic repeater:
* Pass through to classic repeater with vlan tag
* intact without any group key index. Hardware
* will know which key to use to send frame to
* repeater.
*/
nbuf_copy = qdf_nbuf_copy(nbuf);
/*
* Send multicast frame to special peers even
* if pass through to classic repeater fails.
*/
if (nbuf_copy && dp_tx_send((struct cdp_vdev *)vdev, nbuf_copy)) {
qdf_nbuf_free(nbuf_copy);
}
group_key = vdev->iv_vlan_map[vlan_id];
/*
* If group key is not installed, drop the frame.
*/
if (!group_key)
return false;
dp_tx_remove_vlan_tag(vdev, nbuf);
dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
msdu_info->exception_fw = 1;
return true;
}
/**
* dp_rx_multipass_process - insert vlan tag on frames for traffic separation
* @vdev: DP vdev handle
* @nbuf: skb
* @tid: traffic priority
*
* Return: bool: true if tag is inserted else false
*/
bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
{
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
struct vlan_ethhdr vethhdr;
if (qdf_unlikely(!peer->vlan_id))
return false;
if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < ETHERTYPE_VLAN_LEN))
return false;
/*
* Form the VLAN header and insert in nbuf
*/
qdf_mem_copy(vethhdr.h_dest, eh->ether_dhost, QDF_MAC_ADDR_SIZE);
qdf_mem_copy(vethhdr.h_source, eh->ether_shost, QDF_MAC_ADDR_SIZE);
vethhdr.h_vlan_proto = htons(QDF_ETH_TYPE_8021Q);
vethhdr.h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
(peer->vlan_id & VLAN_VID_MASK));
/*
* Packet format : DSTMAC | SRCMAC | <VLAN HEADERS TO BE INSERTED> | ETHERTYPE | IP HEADER
* DSTMAC: 6 BYTES
* SRCMAC: 6 BYTES
* VLAN HEADER: 4 BYTES ( TPID | PCP | VLAN ID)
* ETHERTYPE: 2 BYTES
*/
qdf_nbuf_push_head(nbuf, sizeof(struct vlan_hdr));
qdf_mem_copy(qdf_nbuf_data(nbuf), &vethhdr,
sizeof(struct vlan_ethhdr)- ETHERNET_TYPE_LEN);
return true;
}
/**
* dp_peer_multipass_list_remove: remove peer from list
* @peer: pointer to peer
*
* return: void
*/
void dp_peer_multipass_list_remove(struct dp_peer *peer)
{
struct dp_vdev *vdev = peer->vdev;
struct dp_peer *tpeer = NULL;
bool found = 0;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
if (tpeer == peer) {
found = 1;
TAILQ_REMOVE(&vdev->mpass_peer_list, peer, mpass_peer_list_elem);
break;
}
}
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (found)
dp_peer_unref_delete(peer);
}
/**
* dp_peer_multipass_list_add: add to new multipass list
* @dp_soc: soc handle
* @dp_vdev: vdev handle
* @peer_mac: mac address
*
* return: void
*/
static void dp_peer_multipass_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
uint8_t *peer_mac)
{
struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
vdev->vdev_id);
if (!peer) {
return;
}
/*
* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it when element is deleted from the list.
*/
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_INSERT_HEAD(&vdev->mpass_peer_list, peer, mpass_peer_list_elem);
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
}
/**
* dp_peer_set_vlan_id: set vlan_id for this peer
* @cdp_soc: soc handle
* @peer_mac: mac address
* @vlan_id: vlan id for peer
*
* return: void
*/
void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
struct cdp_vdev *vdev_handle, uint8_t *peer_mac,
uint16_t vlan_id)
{
struct dp_soc *soc = (struct dp_soc *)cdp_soc;
struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
struct dp_peer *peer = NULL;
if (!vdev->multipass_en)
return;
peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev->vdev_id);
if (qdf_unlikely(!peer)) {
qdf_err("NULL peer");
return;
}
peer->vlan_id = vlan_id;
/* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it here.
*/
dp_peer_unref_delete(peer);
dp_peer_multipass_list_add(soc, vdev, peer_mac);
}
/**
* dp_set_vlan_groupkey: set vlan map for vdev
* @vdev_handle: pointer to vdev
* @vlan_id: vlan_id
* @group_key: group key for vlan
*
* return: set success/failure
*/
QDF_STATUS dp_set_vlan_groupkey(struct cdp_vdev *vdev_handle,
uint16_t vlan_id, uint16_t group_key)
{
struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
if (!vdev->multipass_en)
return QDF_STATUS_E_INVAL;
if (!vdev->iv_vlan_map) {
uint16_t vlan_map_size = (sizeof(uint16_t))*DP_MAX_VLAN_IDS;
vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size);
if (!vdev->iv_vlan_map) {
QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map");
return QDF_STATUS_E_NOMEM;
}
/*
* 0 is invalid group key.
* Initilalize array with invalid group keys.
*/
qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size);
}
if (vlan_id >= DP_MAX_VLAN_IDS)
return QDF_STATUS_E_INVAL;
vdev->iv_vlan_map[vlan_id] = group_key;
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_vdev_multipass_deinit: set vlan map for vdev
* @vdev_handle: pointer to vdev
*
* return: void
*/
void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
{
struct dp_peer *peer = NULL;
qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
TAILQ_FOREACH(peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
qdf_err("Peers present in mpass list : %llx",
peer->mac_addr.raw);
qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
if (vdev->iv_vlan_map) {
qdf_mem_free(vdev->iv_vlan_map);
vdev->iv_vlan_map = NULL;
}
qdf_spinlock_destroy(&vdev->mpass_peer_mutex);
}
/**
* dp_peer_multipass_list_init: initialize peer mulitpass list
* @vdev_handle: pointer to vdev
*
* return: set success/failure
*/
void dp_peer_multipass_list_init(struct dp_vdev *vdev)
{
/*
* vdev->iv_vlan_map is allocated when the first configuration command
* is issued to avoid unnecessary allocation for regular mode VAP.
*/
TAILQ_INIT(&vdev->mpass_peer_list);
qdf_spinlock_create(&vdev->mpass_peer_mutex);
}
#endif