qcacmn: REO queue ref enhancement for Waikiki

In WIN BE chipsets, replace the REO tid
queue programming in FW via WMI with writing to a
Host managed table shared by HW and SW. REO HW will
pick the tid queue address from the table indexed by
peer id and tid number.

Change-Id: I8107ca5116425538329b11ae3519f02b32573bac
这个提交包含在:
Sumedh Baikady
2021-11-01 16:39:34 -07:00
提交者 Madan Koyyalamudi
父节点 e54c2ef953
当前提交 e4d9b0c2d7
修改 17 个文件,包含 623 行新增112 行删除

查看文件

@@ -1531,6 +1531,8 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
arch_ops->mlo_peer_find_hash_remove = dp_mlo_peer_find_hash_remove_be;
arch_ops->mlo_peer_find_hash_find = dp_mlo_peer_find_hash_find_be;
#endif
arch_ops->dp_peer_rx_reorder_queue_setup =
dp_peer_rx_reorder_queue_setup_be;
arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_be;
dp_init_near_full_arch_ops_be(arch_ops);
}

查看文件

@@ -22,6 +22,7 @@
#include <dp_types.h>
#include "dp_be.h"
#include "dp_peer.h"
/*
* dp_be_intrabss_params
@@ -224,6 +225,135 @@ bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
struct dp_vdev *vdev,
struct dp_peer *peer,
qdf_nbuf_t nbuf);
#endif
/**
* dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
per peer type
* @soc: DP Soc handle
* @peer: dp peer to operate on
* @tid: TID
* @ba_window_size: BlockAck window size
*
* Return: 0 - success, others - failure
*/
static inline
QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
struct dp_peer *peer,
int tid,
uint32_t ba_window_size)
{
uint8_t i;
struct dp_mld_link_peers link_peers_info;
struct dp_peer *link_peer;
struct dp_rx_tid *rx_tid;
struct dp_soc *link_peer_soc;
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->hw_qdesc_paddr)
return QDF_STATUS_E_INVAL;
if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
if (IS_MLO_DP_MLD_PEER(peer)) {
/* get link peers with reference */
dp_get_link_peers_ref_from_mld_peer(soc, peer,
&link_peers_info,
DP_MOD_ID_CDP);
/* send WMI cmd to each link peers */
for (i = 0; i < link_peers_info.num_links; i++) {
link_peer = link_peers_info.link_peers[i];
link_peer_soc = link_peer->vdev->pdev->soc;
if (link_peer_soc->cdp_soc.ol_ops->
peer_rx_reorder_queue_setup) {
if (link_peer_soc->cdp_soc.ol_ops->
peer_rx_reorder_queue_setup(
link_peer_soc->ctrl_psoc,
link_peer->vdev->pdev->pdev_id,
link_peer->vdev->vdev_id,
link_peer->mac_addr.raw,
rx_tid->hw_qdesc_paddr,
tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
link_peer_soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
}
/* release link peers reference */
dp_release_link_peers_ref(&link_peers_info,
DP_MOD_ID_CDP);
} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
if (soc->cdp_soc.ol_ops->
peer_rx_reorder_queue_setup(
soc->ctrl_psoc,
peer->vdev->pdev->pdev_id,
peer->vdev->vdev_id,
peer->mac_addr.raw,
rx_tid->hw_qdesc_paddr,
tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
} else {
dp_peer_err("invalid peer type %d", peer->peer_type);
return QDF_STATUS_E_FAILURE;
}
} else {
/* Some BE targets dont require WMI and use shared
* table managed by host for storing Reo queue ref structs
*/
if (IS_MLO_DP_LINK_PEER(peer) ||
peer->peer_id == HTT_INVALID_PEER) {
/* Return if this is for MLD link peer and table
* is not used in MLD link peer case as MLD peer's
* qref is written to LUT in peer setup or peer map.
* At this point peer setup for link peer is called
* before peer map, hence peer id is not assigned.
* This could happen if peer_setup is called before
* host receives HTT peer map. In this case return
* success with no op and let peer map handle
* writing the reo_qref to LUT.
*/
dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
return QDF_STATUS_SUCCESS;
}
hal_reo_shared_qaddr_write(soc->hal_soc,
peer->peer_id,
tid, peer->rx_tid[tid].hw_qdesc_paddr);
}
return QDF_STATUS_SUCCESS;
}
#else
static inline
QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
struct dp_peer *peer,
int tid,
uint32_t ba_window_size)
{
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
if (!rx_tid->hw_qdesc_paddr)
return QDF_STATUS_E_INVAL;
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
soc->ctrl_psoc,
peer->vdev->pdev->pdev_id,
peer->vdev->vdev_id,
peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
return QDF_STATUS_SUCCESS;
}
#endif /* WLAN_FEATURE_11BE_MLO */
#endif

查看文件

@@ -7379,6 +7379,7 @@ dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
struct dp_peer *peer =
dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
DP_MOD_ID_CDP);
struct dp_peer *mld_peer = NULL;
enum wlan_op_mode vdev_opmode;
uint8_t lmac_peer_id_msb = 0;
@@ -7441,8 +7442,23 @@ dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_assert_always(0);
}
if (vdev_opmode != wlan_op_mode_monitor)
if (vdev_opmode != wlan_op_mode_monitor) {
/* In case of MLD peer, switch peer to mld peer and
* do peer_rx_init.
*/
if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
IS_MLO_DP_LINK_PEER(peer)) {
if (setup_info && setup_info->is_first_link) {
mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
if (mld_peer)
dp_peer_rx_init(pdev, mld_peer);
else
dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
}
} else {
dp_peer_rx_init(pdev, peer);
}
}
dp_peer_ppdu_delayed_ba_init(peer);

查看文件

@@ -2479,6 +2479,22 @@ void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
rx_tid->pn_size);
}
#ifdef REO_SHARED_QREF_TABLE_EN
void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
struct dp_peer *peer)
{
uint8_t tid;
if (IS_MLO_DP_LINK_PEER(peer))
return;
if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
for (tid = 0; tid < DP_MAX_TIDS; tid++)
hal_reo_shared_qaddr_write(soc->hal_soc,
peer->peer_id, tid, 0);
}
}
#endif
/*
* dp_peer_find_add_id() - map peer_id with peer
* @soc: soc handle
@@ -2569,6 +2585,7 @@ dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
QDF_STATUS err = QDF_STATUS_SUCCESS;
struct dp_soc *primary_soc;
dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
soc, peer_id, ml_peer_id,
@@ -2606,6 +2623,28 @@ dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
peer_mac_addr,
type, 0);
}
/* If peer setup and hence rx_tid setup got called
* before htt peer map then Qref write to LUT did not
* happen in rx_tid setup as peer_id was invalid.
* So defer Qref write to peer map handler. Check if
* rx_tid qdesc for tid 0 is already setup and perform
* qref write to LUT for Tid 0 and 16.
*
* Peer map could be obtained on assoc link, hence
* change to primary link's soc.
*/
primary_soc = peer->vdev->pdev->soc;
if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
hal_reo_shared_qaddr_write(primary_soc->hal_soc,
ml_peer_id,
0,
peer->rx_tid[0].hw_qdesc_paddr);
hal_reo_shared_qaddr_write(primary_soc->hal_soc,
ml_peer_id,
DP_NON_QOS_TID,
peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
}
}
err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
@@ -2706,7 +2745,28 @@ dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
peer_mac_addr,
type, 0);
}
/* If peer setup and hence rx_tid setup got called
* before htt peer map then Qref write to LUT did
* not happen in rx_tid setup as peer_id was invalid.
* So defer Qref write to peer map handler. Check if
* rx_tid qdesc for tid 0 is already setup perform qref
* write to LUT for Tid 0 and 16.
*/
if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
!IS_MLO_DP_LINK_PEER(peer)) {
hal_reo_shared_qaddr_write(soc->hal_soc,
peer_id,
0,
peer->rx_tid[0].hw_qdesc_paddr);
hal_reo_shared_qaddr_write(soc->hal_soc,
peer_id,
DP_NON_QOS_TID,
peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
}
}
err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
vdev_id, ast_hash, is_wds);
}
@@ -2774,6 +2834,14 @@ dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
soc, peer_id, peer);
/* Clear entries in Qref LUT */
/* TODO: Check if this is to be called from
* dp_peer_delete for MLO case if there is race between
* new peer id assignment and still not having received
* peer unmap for MLD peer with same peer id.
*/
dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
dp_peer_find_id_to_obj_remove(soc, peer_id);
dp_mlo_partner_chips_unmap(soc, peer_id);
peer->peer_id = HTT_INVALID_PEER;
@@ -2914,83 +2982,6 @@ bool dp_rx_tid_update_allow(struct dp_peer *peer)
return true;
}
/**
* dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
per peer type
* @soc: DP Soc handle
* @peer: dp peer to operate on
* @tid: TID
* @ba_window_size: BlockAck window size
*
* Return: 0 - success, others - failure
*/
static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
struct dp_peer *peer,
int tid,
uint32_t ba_window_size)
{
uint8_t i;
struct dp_mld_link_peers link_peers_info;
struct dp_peer *link_peer;
struct dp_rx_tid *rx_tid;
struct dp_soc *link_peer_soc;
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->hw_qdesc_paddr)
return QDF_STATUS_E_INVAL;
if (IS_MLO_DP_MLD_PEER(peer)) {
/* get link peers with reference */
dp_get_link_peers_ref_from_mld_peer(soc, peer,
&link_peers_info,
DP_MOD_ID_CDP);
/* send WMI cmd to each link peers */
for (i = 0; i < link_peers_info.num_links; i++) {
link_peer = link_peers_info.link_peers[i];
link_peer_soc = link_peer->vdev->pdev->soc;
if (link_peer_soc->cdp_soc.ol_ops->
peer_rx_reorder_queue_setup) {
if (link_peer_soc->cdp_soc.ol_ops->
peer_rx_reorder_queue_setup(
link_peer_soc->ctrl_psoc,
link_peer->vdev->pdev->pdev_id,
link_peer->vdev->vdev_id,
link_peer->mac_addr.raw,
rx_tid->hw_qdesc_paddr,
tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
link_peer_soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
}
/* release link peers reference */
dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
if (soc->cdp_soc.ol_ops->
peer_rx_reorder_queue_setup(
soc->ctrl_psoc,
peer->vdev->pdev->pdev_id,
peer->vdev->vdev_id,
peer->mac_addr.raw,
rx_tid->hw_qdesc_paddr,
tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
} else {
dp_peer_err("invalid peer type %d", peer->peer_type);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
}
#else
static inline
bool dp_rx_tid_setup_allow(struct dp_peer *peer)
@@ -3003,32 +2994,6 @@ bool dp_rx_tid_update_allow(struct dp_peer *peer)
{
return true;
}
static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
struct dp_peer *peer,
int tid,
uint32_t ba_window_size)
{
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
if (!rx_tid->hw_qdesc_paddr)
return QDF_STATUS_E_INVAL;
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
soc->ctrl_psoc,
peer->vdev->pdev->pdev_id,
peer->vdev->vdev_id,
peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
return QDF_STATUS_SUCCESS;
}
#endif
QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
@@ -3753,11 +3718,10 @@ static void dp_peer_rx_tids_init(struct dp_peer *peer)
int tid;
struct dp_rx_tid *rx_tid;
/* if not first assoc link peer or MLD peer,
/* if not first assoc link peer,
* not to initialize rx_tids again.
*/
if ((IS_MLO_DP_LINK_PEER(peer) && !peer->first_link) ||
IS_MLO_DP_MLD_PEER(peer))
if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
return;
for (tid = 0; tid < DP_MAX_TIDS; tid++) {

查看文件

@@ -1077,6 +1077,9 @@ void dp_peer_delete(struct dp_soc *soc,
/* is MLO connection mld peer */
#define IS_MLO_DP_MLD_PEER(_peer) \
((_peer)->peer_type == CDP_MLD_PEER_TYPE)
/* Get Mld peer from link peer */
#define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
((link_peer)->mld_peer)
#ifdef WLAN_MLO_MULTI_CHIP
uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
@@ -1533,6 +1536,7 @@ bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
#define IS_DP_LEGACY_PEER(_peer) true
#define IS_MLO_DP_LINK_PEER(_peer) false
#define IS_MLO_DP_MLD_PEER(_peer) false
#define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
static inline
struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
@@ -1714,4 +1718,12 @@ void dp_peer_rx_tids_destroy(struct dp_peer *peer)
peer->rx_tid = NULL;
}
#ifdef REO_SHARED_QREF_TABLE_EN
void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
struct dp_peer *peer);
#else
static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
struct dp_peer *peer) {}
#endif
#endif /* _DP_PEER_H_ */

查看文件

@@ -2383,4 +2383,12 @@ end:
return peer;
}
static inline QDF_STATUS
dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer,
int tid, uint32_t ba_window_size)
{
return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc,
peer, tid,
ba_window_size);
}
#endif /* _DP_RX_H */

查看文件

@@ -5915,6 +5915,38 @@ void dp_print_peer_txrx_stats_li(struct dp_peer *peer,
}
}
#ifdef REO_SHARED_QREF_TABLE_EN
static void dp_peer_print_reo_qref_table(struct dp_peer *peer)
{
struct hal_soc *hal;
struct dp_peer *mld_peer;
int i;
uint64_t *reo_qref_addr;
uint32_t peer_idx;
hal = (struct hal_soc *)peer->vdev->pdev->soc->hal_soc;
peer_idx = (peer->peer_id * DP_MAX_TIDS);
reo_qref_addr = &hal->reo_qref.non_mlo_reo_qref_table_vaddr[peer_idx];
mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
if (mld_peer) {
peer = mld_peer;
hal = (struct hal_soc *)
peer->vdev->pdev->soc->hal_soc;
peer_idx = (mld_peer->peer_id - HAL_ML_PEER_ID_START) *
DP_MAX_TIDS;
reo_qref_addr = &hal->reo_qref.mlo_reo_qref_table_vaddr[peer_idx];
}
DP_PRINT_STATS("Reo Qref table for peer_id: %d\n", peer->peer_id);
for (i = 0; i < DP_MAX_TIDS; i++)
DP_PRINT_STATS(" Tid [%d] :%llx", i, reo_qref_addr[i]);
}
#else
static inline void dp_peer_print_reo_qref_table(struct dp_peer *peer)
{
}
#endif
void dp_print_peer_stats(struct dp_peer *peer)
{
uint8_t i;
@@ -6184,6 +6216,8 @@ void dp_print_peer_stats(struct dp_peer *peer)
pdev->soc->arch_ops.txrx_print_peer_stats(peer, PEER_RX_STATS);
dp_peer_print_rx_delay_stats(pdev, peer);
dp_peer_print_reo_qref_table(peer);
}
void dp_print_per_ring_stats(struct dp_soc *soc)

查看文件

@@ -1760,6 +1760,11 @@ struct dp_arch_ops {
#endif
void (*txrx_print_peer_stats)(struct dp_peer *peer,
enum peer_stats_type stats_type);
/* Dp peer reorder queue setup */
QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
struct dp_peer *peer,
int tid,
uint32_t ba_window_size);
};
/**

查看文件

@@ -548,6 +548,8 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->reo_remap_config = dp_reo_remap_config_li;
arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
arch_ops->dp_peer_rx_reorder_queue_setup =
dp_peer_rx_reorder_queue_setup_li;
}
#ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH

查看文件

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -201,4 +202,31 @@ void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
{
}
#endif
static inline
QDF_STATUS dp_peer_rx_reorder_queue_setup_li(struct dp_soc *soc,
struct dp_peer *peer,
int tid,
uint32_t ba_window_size)
{
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
if (!rx_tid->hw_qdesc_paddr)
return QDF_STATUS_E_INVAL;
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
soc->ctrl_psoc,
peer->vdev->pdev->pdev_id,
peer->vdev->vdev_id,
peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
1, ba_window_size)) {
dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
soc, tid);
return QDF_STATUS_E_FAILURE;
}
}
return QDF_STATUS_SUCCESS;
}
#endif

查看文件

@@ -1796,4 +1796,155 @@ hal_txmon_status_free_buffer_generic_be(qdf_frag_t status_frag)
} while (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE);
}
#endif /* QCA_MONITOR_2_0_SUPPORT */
#ifdef REO_SHARED_QREF_TABLE_EN
/* hal_reo_shared_qaddr_write(): Write REO tid queue addr
* LUT shared by SW and HW at the index given by peer id
* and tid.
*
* @hal_soc: hal soc pointer
* @reo_qref_addr: pointer to index pointed to be peer_id
* and tid
* @tid: tid queue number
* @hw_qdesc_paddr: reo queue addr
*/
static void hal_reo_shared_qaddr_write_be(hal_soc_handle_t hal_soc_hdl,
uint16_t peer_id,
int tid,
qdf_dma_addr_t hw_qdesc_paddr)
{
struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
struct rx_reo_queue_reference *reo_qref;
uint32_t peer_tid_idx;
/* Plug hw_desc_addr in Host reo queue reference table */
if (HAL_PEER_ID_IS_MLO(peer_id)) {
peer_tid_idx = ((peer_id - HAL_ML_PEER_ID_START) *
DP_MAX_TIDS) + tid;
reo_qref = (struct rx_reo_queue_reference *)
&hal->reo_qref.mlo_reo_qref_table_vaddr[peer_tid_idx];
} else {
peer_tid_idx = (peer_id * DP_MAX_TIDS) + tid;
reo_qref = (struct rx_reo_queue_reference *)
&hal->reo_qref.non_mlo_reo_qref_table_vaddr[peer_tid_idx];
}
reo_qref->rx_reo_queue_desc_addr_31_0 =
hw_qdesc_paddr & 0xffffffff;
reo_qref->rx_reo_queue_desc_addr_39_32 =
(hw_qdesc_paddr & 0xff00000000) >> 32;
if (hw_qdesc_paddr != 0)
reo_qref->receive_queue_number = tid;
else
reo_qref->receive_queue_number = 0;
hal_verbose_debug("hw_qdesc_paddr: %llx, tid: %d, reo_qref:%pK,"
"rx_reo_queue_desc_addr_31_0: %x,"
"rx_reo_queue_desc_addr_39_32: %x",
hw_qdesc_paddr, tid, reo_qref,
reo_qref->rx_reo_queue_desc_addr_31_0,
reo_qref->rx_reo_queue_desc_addr_39_32);
}
/**
* hal_reo_shared_qaddr_setup() - Allocate MLO and Non MLO reo queue
* reference table shared between SW and HW and initialize in Qdesc Base0
* base1 registers provided by HW.
*
* @hal_soc: HAL Soc handle
*
* Return: None
*/
static void hal_reo_shared_qaddr_setup_be(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
hal->reo_qref.reo_qref_table_en = 1;
hal->reo_qref.mlo_reo_qref_table_vaddr =
(uint64_t *)qdf_mem_alloc_consistent(
hal->qdf_dev, hal->qdf_dev->dev,
REO_QUEUE_REF_ML_TABLE_SIZE,
&hal->reo_qref.mlo_reo_qref_table_paddr);
hal->reo_qref.non_mlo_reo_qref_table_vaddr =
(uint64_t *)qdf_mem_alloc_consistent(
hal->qdf_dev, hal->qdf_dev->dev,
REO_QUEUE_REF_NON_ML_TABLE_SIZE,
&hal->reo_qref.non_mlo_reo_qref_table_paddr);
hal_verbose_debug("MLO table start paddr:%llx,"
"Non-MLO table start paddr:%llx,"
"MLO table start vaddr: %pK,"
"Non MLO table start vaddr: %pK",
hal->reo_qref.mlo_reo_qref_table_paddr,
hal->reo_qref.non_mlo_reo_qref_table_paddr,
hal->reo_qref.mlo_reo_qref_table_vaddr,
hal->reo_qref.non_mlo_reo_qref_table_vaddr);
}
/**
* hal_reo_shared_qaddr_init() - Zero out REO qref LUT and
* write start addr of MLO and Non MLO table in HW
*
* @hal_soc: HAL Soc handle
*
* Return: None
*/
static void hal_reo_shared_qaddr_init_be(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
qdf_mem_zero(hal->reo_qref.mlo_reo_qref_table_vaddr,
REO_QUEUE_REF_ML_TABLE_SIZE);
qdf_mem_zero(hal->reo_qref.non_mlo_reo_qref_table_vaddr,
REO_QUEUE_REF_NON_ML_TABLE_SIZE);
/* LUT_BASE0 and BASE1 registers expect upper 32bits of LUT base address
* and lower 8 bits to be 0. Shift the physical address by 8 to plug
* upper 32bits only
*/
HAL_REG_WRITE(hal,
HWIO_REO_R0_QDESC_LUT_BASE0_ADDR_ADDR(REO_REG_REG_BASE),
hal->reo_qref.non_mlo_reo_qref_table_paddr >> 8);
HAL_REG_WRITE(hal,
HWIO_REO_R0_QDESC_LUT_BASE1_ADDR_ADDR(REO_REG_REG_BASE),
hal->reo_qref.mlo_reo_qref_table_paddr >> 8);
HAL_REG_WRITE(hal,
HWIO_REO_R0_QDESC_ADDR_READ_ADDR(REO_REG_REG_BASE),
HAL_SM(HWIO_REO_R0_QDESC_ADDR_READ, LUT_FEATURE_ENABLE,
1));
HAL_REG_WRITE(hal,
HWIO_REO_R0_QDESC_MAX_SW_PEER_ID_ADDR(REO_REG_REG_BASE),
HAL_MS(HWIO_REO_R0_QDESC, MAX_SW_PEER_ID_MAX_SUPPORTED,
0x1fff));
}
/**
* hal_reo_shared_qaddr_detach() - Free MLO and Non MLO reo queue
* reference table shared between SW and HW
*
* @hal_soc: HAL Soc handle
*
* Return: None
*/
static void hal_reo_shared_qaddr_detach_be(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
HAL_REG_WRITE(hal,
HWIO_REO_R0_QDESC_LUT_BASE0_ADDR_ADDR(REO_REG_REG_BASE),
0);
HAL_REG_WRITE(hal,
HWIO_REO_R0_QDESC_LUT_BASE1_ADDR_ADDR(REO_REG_REG_BASE),
0);
qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
REO_QUEUE_REF_ML_TABLE_SIZE,
hal->reo_qref.mlo_reo_qref_table_vaddr,
hal->reo_qref.mlo_reo_qref_table_paddr, 0);
qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
REO_QUEUE_REF_NON_ML_TABLE_SIZE,
hal->reo_qref.non_mlo_reo_qref_table_vaddr,
hal->reo_qref.non_mlo_reo_qref_table_paddr, 0);
}
#endif
#endif /* _HAL_BE_GENERIC_API_H_ */

查看文件

@@ -43,7 +43,9 @@
#include <reo_descriptor_threshold_reached_status.h>
#include <reo_flush_queue.h>
#ifdef REO_SHARED_QREF_TABLE_EN
#include "rx_reo_queue_reference.h"
#endif
#define HAL_DESC_64_SET_FIELD(_desc, _word, _fld, _value) do { \
((uint64_t *)(_desc))[(_word ## _ ## _fld ## _OFFSET) >> 3] &= \
~(_word ## _ ## _fld ## _MASK); \

查看文件

@@ -1028,6 +1028,13 @@ struct hal_hw_txrx_ops {
#ifdef QCA_MONITOR_2_0_SUPPORT
void (*hal_txmon_status_free_buffer)(qdf_frag_t status_frag);
#endif /* QCA_MONITOR_2_0_SUPPORT */
void (*hal_reo_shared_qaddr_setup)(hal_soc_handle_t hal_soc_hdl);
void (*hal_reo_shared_qaddr_init)(hal_soc_handle_t hal_soc_hdl);
void (*hal_reo_shared_qaddr_detach)(hal_soc_handle_t hal_soc_hdl);
void (*hal_reo_shared_qaddr_write)(hal_soc_handle_t hal_soc_hdl,
uint16_t peer_id,
int tid,
qdf_dma_addr_t hw_qdesc_paddr);
};
/**
@@ -1084,6 +1091,22 @@ struct hal_reg_write_fail_history {
};
#endif
/**
* struct reo_queue_ref_table - Reo qref LUT addr
* @mlo_reo_qref_table_vaddr: MLO table vaddr
* @non_mlo_reo_qref_table_vaddr: Non MLO table vaddr
* @mlo_reo_qref_table_paddr: MLO table paddr
* @non_mlo_reo_qref_table_paddr: Non MLO table paddr
* @reo_qref_table_en: Enable flag
*/
struct reo_queue_ref_table {
uint64_t *mlo_reo_qref_table_vaddr;
uint64_t *non_mlo_reo_qref_table_vaddr;
qdf_dma_addr_t mlo_reo_qref_table_paddr;
qdf_dma_addr_t non_mlo_reo_qref_table_paddr;
uint8_t reo_qref_table_en;
};
/**
* struct hal_soc - HAL context to be used to access SRNG APIs
* (currently used by data path and
@@ -1165,6 +1188,8 @@ struct hal_soc {
#endif
/* flag to indicate cmn dmac rings in berryllium */
bool dmac_cmn_src_rxbuf_ring;
/* Reo queue ref table items */
struct reo_queue_ref_table reo_qref;
};
#if defined(FEATURE_HAL_DELAYED_REG_WRITE)
@@ -1227,4 +1252,14 @@ struct hal_srng *hal_ring_handle_to_hal_srng(hal_ring_handle_t hal_ring)
{
return (struct hal_srng *)hal_ring;
}
/* Size of REO queue reference table in Host
* 2k peers * 17 tids * 8bytes(rx_reo_queue_reference)
* = 278528 bytes
*/
#define REO_QUEUE_REF_NON_ML_TABLE_SIZE 278528
/* Calculated based on 512 MLO peers */
#define REO_QUEUE_REF_ML_TABLE_SIZE 69632
#define HAL_ML_PEER_ID_START 0x2000
#define HAL_PEER_ID_IS_MLO(peer_id) ((peer_id) & HAL_ML_PEER_ID_START)
#endif /* _HAL_INTERNAL_H_ */

查看文件

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -716,4 +717,50 @@ hal_get_tlv_hdr_size(hal_soc_handle_t hal_soc_hdl)
*/
void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl,
hal_ring_handle_t hal_ring_hdl);
#ifdef REO_SHARED_QREF_TABLE_EN
/**
* hal_reo_shared_qaddr_setup(): Setup reo qref LUT
* @hal_soc: Hal soc pointer
*
* Allocate MLO and Non MLO table for storing REO queue
* reference pointers
*
* Return: void
*/
static inline void
hal_reo_shared_qaddr_setup(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (hal_soc->ops->hal_reo_shared_qaddr_setup)
return hal_soc->ops->hal_reo_shared_qaddr_setup(hal_soc_hdl);
}
/**
* hal_reo_shared_qaddr_detach(): Detach reo qref LUT
* @hal_soc: Hal soc pointer
*
* Detach MLO and Non MLO table start addr to HW reg
*
* Return: void
*/
static inline void
hal_reo_shared_qaddr_detach(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (hal_soc->ops->hal_reo_shared_qaddr_detach)
return hal_soc->ops->hal_reo_shared_qaddr_detach(hal_soc_hdl);
}
#else
static inline void
hal_reo_shared_qaddr_setup(hal_soc_handle_t hal_soc_hdl)
{
}
static inline void
hal_reo_shared_qaddr_detach(hal_soc_handle_t hal_soc_hdl) {}
#endif /* REO_SHARED_QREF_TABLE_EN */
#endif /* _HAL_REO_H */

查看文件

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -2871,4 +2871,64 @@ hal_rx_mpdu_info_ampdu_flag_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf)
return hal_soc->ops->hal_rx_mpdu_info_ampdu_flag_get(buf);
}
#ifdef REO_SHARED_QREF_TABLE_EN
/**
* hal_reo_shared_qaddr_write(): Write REo tid queue addr
* LUT shared by SW and HW at the index given by peer id
* and tid.
*
* @hal_soc: hal soc pointer
* @reo_qref_addr: pointer to index pointed to be peer_id
* and tid
* @tid: tid queue number
* @hw_qdesc_paddr: reo queue addr
*/
static inline void
hal_reo_shared_qaddr_write(hal_soc_handle_t hal_soc_hdl,
uint16_t peer_id,
int tid,
qdf_dma_addr_t hw_qdesc_paddr)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (hal_soc->ops->hal_reo_shared_qaddr_write)
return hal_soc->ops->hal_reo_shared_qaddr_write(hal_soc_hdl,
peer_id, tid, hw_qdesc_paddr);
}
/**
* hal_reo_shared_qaddr_init(): Initialize reo qref LUT
* @hal_soc: Hal soc pointer
*
* Write MLO and Non MLO table start addr to HW reg
*
* Return: void
*/
static inline void
hal_reo_shared_qaddr_init(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (hal_soc->ops->hal_reo_shared_qaddr_init)
return hal_soc->ops->hal_reo_shared_qaddr_init(hal_soc_hdl);
}
#else
static inline void
hal_reo_shared_qaddr_write(hal_soc_handle_t hal_soc_hdl,
uint16_t peer_id,
int tid,
qdf_dma_addr_t hw_qdesc_paddr) {}
static inline void
hal_reo_shared_qaddr_init(hal_soc_handle_t hal_soc_hdl) {}
#endif /* REO_SHARED_QREF_TABLE_EN */
static inline uint8_t
hal_reo_shared_qaddr_is_enable(hal_soc_handle_t hal_soc_hdl)
{
struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
return hal->reo_qref.reo_qref_table_en;
}
#endif /* _HAL_RX_H */

查看文件

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -1087,6 +1087,8 @@ void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev)
qdf_atomic_init(&hal->active_work_cnt);
hal_delayed_reg_write_init(hal);
hal_reo_shared_qaddr_setup((hal_soc_handle_t)hal);
return (void *)hal;
fail3:
qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
@@ -1151,6 +1153,9 @@ extern void hal_detach(void *hal_soc)
sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
qdf_minidump_remove(hal, sizeof(*hal), "hal_soc");
hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal);
qdf_mem_free(hal);
return;

查看文件

@@ -859,6 +859,7 @@ static inline void hal_rx_dump_mpdu_start_tlv_9224(void *mpdustart,
#endif
QDF_TRACE(dbg_level, QDF_MODULE_ID_HAL,
"rx_mpdu_start tlv (1/5) - "
"rx_reo_queue_desc_addr_31_0 :%x"
"rx_reo_queue_desc_addr_39_32 :%x"
"receive_queue_number:%x "
"pre_delim_err_warning:%x "
@@ -877,6 +878,7 @@ static inline void hal_rx_dump_mpdu_start_tlv_9224(void *mpdustart,
"bssid_number:%x "
"tid:%x "
"reserved_7a:%x ",
mpdu_info->rx_reo_queue_desc_addr_31_0,
mpdu_info->rx_reo_queue_desc_addr_39_32,
mpdu_info->receive_queue_number,
mpdu_info->pre_delim_err_warning,
@@ -1625,6 +1627,8 @@ static void hal_reo_setup_9224(struct hal_soc *soc, void *reoparams)
* GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
* GLOBAL_LINK_DESC_COUNT_CTRL
*/
hal_reo_shared_qaddr_init((hal_soc_handle_t)soc);
}
static void hal_hw_txrx_ops_attach_qcn9224(struct hal_soc *hal_soc)
@@ -1830,6 +1834,12 @@ static void hal_hw_txrx_ops_attach_qcn9224(struct hal_soc *hal_soc)
hal_rx_priv_info_get_from_tlv_be;
hal_soc->ops->hal_rx_pkt_hdr_get = hal_rx_pkt_hdr_get_be;
hal_soc->ops->hal_reo_setup = hal_reo_setup_9224;
#ifdef REO_SHARED_QREF_TABLE_EN
hal_soc->ops->hal_reo_shared_qaddr_setup = hal_reo_shared_qaddr_setup_be;
hal_soc->ops->hal_reo_shared_qaddr_init = hal_reo_shared_qaddr_init_be;
hal_soc->ops->hal_reo_shared_qaddr_detach = hal_reo_shared_qaddr_detach_be;
hal_soc->ops->hal_reo_shared_qaddr_write = hal_reo_shared_qaddr_write_be;
#endif
};
struct hal_hw_srng_config hw_srng_table_9224[] = {