qcacmn: Do book keeping for frames which doesn't require REO

Management frames originating from links which doesn't require
reordering are not passed through mgmt Rx REO module. This may result in
delayed delivery of frames in the reorder list. Fix this by allowing
such frames to enter mgmt Rx REO module.

CRs-Fixed: 3224128
Change-Id: I94f7bc902acaac959b6bed475413925c266d5557
This commit is contained in:
Edayilliam Jayadev
2022-06-20 14:50:41 +05:30
committed by Madan Koyyalamudi
parent f853241025
commit 9f75a857b7
3 changed files with 46 additions and 30 deletions

View File

@@ -2248,7 +2248,8 @@ mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params); new_frame_global_ts = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
/* Prepare the list entry before acquiring lock */ /* Prepare the list entry before acquiring lock */
if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) { if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
frame_desc->reo_required) {
status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry); status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
if (QDF_IS_STATUS_ERROR(status)) { if (QDF_IS_STATUS_ERROR(status)) {
mgmt_rx_reo_err("Failed to prepare list entry"); mgmt_rx_reo_err("Failed to prepare list entry");
@@ -2301,7 +2302,7 @@ mgmt_rx_reo_update_list(struct mgmt_rx_reo_list *reo_list,
qdf_assert_always(!list_insertion_pos); qdf_assert_always(!list_insertion_pos);
if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME && if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
!frame_desc->is_stale) { !frame_desc->is_stale && frame_desc->reo_required) {
if (least_greater_entry_found) { if (least_greater_entry_found) {
status = mgmt_rx_reo_update_wait_count( status = mgmt_rx_reo_update_wait_count(
&new_entry->wait_count, &new_entry->wait_count,
@@ -2400,15 +2401,6 @@ exit_free_entry:
if (!*is_queued) if (!*is_queued)
return status; return status;
if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME) {
if (least_greater_entry_found)
mgmt_rx_reo_debug("Inserting new entry %pK before %pK",
new_entry, least_greater_entry);
else
mgmt_rx_reo_debug("Inserting new entry %pK at the tail",
new_entry);
}
return status; return status;
} }
@@ -2775,6 +2767,7 @@ mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
cur_frame_debug_info->list_size_rx = desc->list_size_rx; cur_frame_debug_info->list_size_rx = desc->list_size_rx;
cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos; cur_frame_debug_info->list_insertion_pos = desc->list_insertion_pos;
cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id(); cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
cur_frame_debug_info->reo_required = desc->reo_required;
ingress_frame_debug_info->next_index++; ingress_frame_debug_info->next_index++;
ingress_frame_debug_info->next_index %= ingress_frame_debug_info->next_index %=
@@ -2864,7 +2857,7 @@ mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
boarder = ingress_frame_debug_info->boarder; boarder = ingress_frame_debug_info->boarder;
mgmt_rx_reo_alert_no_fl("%s", boarder); mgmt_rx_reo_alert_no_fl("%s", boarder);
mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%11s|%11s|%4s|%3s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|", mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%11s|%4s|%3s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
"Index", "CPU", "D.type", "F.type", "F.subtype", "Index", "CPU", "D.type", "F.type", "F.subtype",
"Link", "SeqNo", "Global ts", "Link", "SeqNo", "Global ts",
"Start ts", "End ts", "Dur", "Last ts", "Start ts", "End ts", "Dur", "Last ts",
@@ -2887,6 +2880,7 @@ mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
char flag_error = ' '; char flag_error = ' ';
char flag_zero_wait_count_rx = ' '; char flag_zero_wait_count_rx = ' ';
char flag_immediate_delivery = ' '; char flag_immediate_delivery = ' ';
char flag_reo_required = ' ';
int64_t ts_last_released_frame = -1; int64_t ts_last_released_frame = -1;
uint8_t link; uint8_t link;
@@ -2914,9 +2908,13 @@ mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
if (info->immediate_delivery) if (info->immediate_delivery)
flag_immediate_delivery = 'I'; flag_immediate_delivery = 'I';
snprintf(flags, sizeof(flags), "%c %c %c %c %c %c", flag_error, if (!info->reo_required)
flag_reo_required = 'N';
snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c", flag_error,
flag_stale, flag_parallel_rx, flag_queued, flag_stale, flag_parallel_rx, flag_queued,
flag_zero_wait_count_rx, flag_immediate_delivery); flag_zero_wait_count_rx, flag_immediate_delivery,
flag_reo_required);
snprintf(wait_count, sizeof(wait_count), snprintf(wait_count, sizeof(wait_count),
"%9llx(%8x, %8x, %8x, %8x, %8x, %8x)", "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
info->wait_count.total_count, info->wait_count.total_count,
@@ -2967,7 +2965,7 @@ mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
fw_forwaded, host); fw_forwaded, host);
} }
mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%11s|%11llu|%4d|%3d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|", mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%13s|%11llu|%4d|%3d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
entry, info->cpu_id, info->desc_type, entry, info->cpu_id, info->desc_type,
info->frame_type, info->frame_subtype, info->frame_type, info->frame_subtype,
info->link_id, info->link_id,

View File

@@ -76,8 +76,8 @@
#define MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE (94) #define MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE (94)
#define MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE (22) #define MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE (22)
#define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE (783) #define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE (785)
#define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE (11) #define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE (13)
#define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE (69) #define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE (69)
#define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE (94) #define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE (94)
#define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE (22) #define MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE (22)
@@ -462,6 +462,9 @@ struct mgmt_rx_reo_sim_context {
* @shared_snapshots: snapshots shared b/w host and target * @shared_snapshots: snapshots shared b/w host and target
* @host_snapshot: host snapshot * @host_snapshot: host snapshot
* @cpu_id: CPU index * @cpu_id: CPU index
* @reo_required: Indicates whether reorder is required for the current frame.
* If reorder is not required, current frame will just be used for updating the
* wait count of frames already part of the reorder list.
*/ */
struct reo_ingress_debug_frame_info { struct reo_ingress_debug_frame_info {
uint8_t link_id; uint8_t link_id;
@@ -489,6 +492,7 @@ struct reo_ingress_debug_frame_info {
[MAX_MLO_LINKS][MGMT_RX_REO_SHARED_SNAPSHOT_MAX]; [MAX_MLO_LINKS][MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS]; struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS];
int cpu_id; int cpu_id;
bool reo_required;
}; };
/** /**
@@ -694,6 +698,9 @@ struct mgmt_rx_reo_context {
* @is_parallel_rx: Indicates that this frame is received in parallel to the * @is_parallel_rx: Indicates that this frame is received in parallel to the
* last frame which is delivered to the upper layer. * last frame which is delivered to the upper layer.
* @pkt_ctr_delta: Packet counter delta of the current and last frame * @pkt_ctr_delta: Packet counter delta of the current and last frame
* @reo_required: Indicates whether reorder is required for the current frame.
* If reorder is not required, current frame will just be used for updating the
* wait count of frames already part of the reorder list.
*/ */
struct mgmt_rx_reo_frame_descriptor { struct mgmt_rx_reo_frame_descriptor {
enum mgmt_rx_reo_frame_descriptor_type type; enum mgmt_rx_reo_frame_descriptor_type type;
@@ -713,6 +720,7 @@ struct mgmt_rx_reo_frame_descriptor {
struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS]; struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS];
bool is_parallel_rx; bool is_parallel_rx;
int pkt_ctr_delta; int pkt_ctr_delta;
bool reo_required;
}; };
/** /**

View File

@@ -141,12 +141,7 @@ tgt_mgmt_rx_reo_enter_algo_without_buffer(
desc.list_insertion_pos = -1; desc.list_insertion_pos = -1;
desc.frame_type = IEEE80211_FC0_TYPE_MGT; desc.frame_type = IEEE80211_FC0_TYPE_MGT;
desc.frame_subtype = 0xFF; desc.frame_subtype = 0xFF;
desc.reo_required = is_mgmt_rx_reo_required(pdev, &desc);
/** If REO is not required for this descriptor,
* no need to proceed further
*/
if (!is_mgmt_rx_reo_required(pdev, &desc))
return QDF_STATUS_SUCCESS;
/* Enter the REO algorithm */ /* Enter the REO algorithm */
status = wlan_mgmt_rx_reo_algo_entry(pdev, &desc, &is_frm_queued); status = wlan_mgmt_rx_reo_algo_entry(pdev, &desc, &is_frm_queued);
@@ -273,18 +268,33 @@ QDF_STATUS tgt_mgmt_rx_reo_frame_handler(
desc.frame_type = frame_type; desc.frame_type = frame_type;
desc.frame_subtype = frame_subtype; desc.frame_subtype = frame_subtype;
/* If REO is not required for this frame, process it right away */
if (frame_type != IEEE80211_FC0_TYPE_MGT || if (frame_type != IEEE80211_FC0_TYPE_MGT ||
!is_mgmt_rx_reo_required(pdev, &desc)) { !is_mgmt_rx_reo_required(pdev, &desc)) {
desc.reo_required = false;
status = wlan_mgmt_rx_reo_algo_entry(pdev, &desc, &is_queued);
if (QDF_IS_STATUS_ERROR(status)) {
mgmt_rx_reo_err("Failure in executing REO algorithm");
goto cleanup;
}
qdf_assert_always(!is_queued);
return tgt_mgmt_txrx_process_rx_frame(pdev, buf, return tgt_mgmt_txrx_process_rx_frame(pdev, buf,
mgmt_rx_params); mgmt_rx_params);
} else {
desc.reo_required = true;
status = wlan_mgmt_rx_reo_algo_entry(pdev, &desc, &is_queued);
qdf_assert_always(QDF_IS_STATUS_ERROR(status) || is_queued);
/**
* If frame is queued, we shouldn't free up params and
* buf pointers.
*/
if (is_queued)
return status;
} }
status = wlan_mgmt_rx_reo_algo_entry(pdev, &desc, &is_queued);
/* If frame is queued, we shouldn't free up params and buf pointers */
if (is_queued)
return status;
cleanup: cleanup:
qdf_nbuf_free(buf); qdf_nbuf_free(buf);
free_mgmt_rx_event_params(mgmt_rx_params); free_mgmt_rx_event_params(mgmt_rx_params);