|
@@ -44,6 +44,7 @@ void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
|
|
|
struct txrx_pdev_cfg_param_t *cfg_param)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg_ctx = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
cfg_ctx->tx_flow_start_queue_offset =
|
|
|
cfg_param->tx_flow_start_queue_offset;
|
|
|
cfg_ctx->tx_flow_stop_queue_th =
|
|
@@ -169,6 +170,7 @@ struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
|
|
|
int ol_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->is_high_latency;
|
|
|
}
|
|
|
|
|
@@ -185,18 +187,21 @@ int ol_cfg_max_peer_id(struct cdp_cfg *cfg_pdev)
|
|
|
int ol_cfg_max_vdevs(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->max_vdev;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_rx_pn_check(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->rx_pn_check;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_rx_fwd_check(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->pn_rx_fwd_check;
|
|
|
}
|
|
|
|
|
@@ -204,18 +209,21 @@ void ol_set_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev,
|
|
|
uint8_t disable_rx_fwd)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
cfg->rx_fwd_disabled = disable_rx_fwd;
|
|
|
}
|
|
|
|
|
|
void ol_set_cfg_packet_log_enabled(struct cdp_cfg *cfg_pdev, uint8_t val)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
cfg->is_packet_log_enabled = val;
|
|
|
}
|
|
|
|
|
|
uint8_t ol_cfg_is_packet_log_enabled(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->is_packet_log_enabled;
|
|
|
}
|
|
|
|
|
@@ -226,6 +234,7 @@ int ol_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev)
|
|
|
return 1;
|
|
|
#else
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->rx_fwd_disabled;
|
|
|
#endif
|
|
|
}
|
|
@@ -233,36 +242,42 @@ int ol_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev)
|
|
|
int ol_cfg_rx_fwd_inter_bss(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->rx_fwd_inter_bss;
|
|
|
}
|
|
|
|
|
|
enum wlan_frm_fmt ol_cfg_frame_type(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->frame_type;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_max_thruput_mbps(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->max_thruput_mbps;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_netbuf_frags_max(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->max_nbuf_frags;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_tx_free_at_download(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->tx_free_at_download;
|
|
|
}
|
|
|
|
|
|
void ol_cfg_set_tx_free_at_download(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
cfg->tx_free_at_download = 1;
|
|
|
}
|
|
|
|
|
@@ -271,6 +286,7 @@ void ol_cfg_set_tx_free_at_download(struct cdp_cfg *cfg_pdev)
|
|
|
uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->target_tx_credit;
|
|
|
}
|
|
|
#else
|
|
@@ -291,30 +307,35 @@ uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev)
|
|
|
int ol_cfg_tx_download_size(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->tx_download_size;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_rx_host_defrag_timeout_duplicate_check(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->defrag_timeout_check;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_throttle_period_ms(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->throttle_period_ms;
|
|
|
}
|
|
|
|
|
|
int ol_cfg_throttle_duty_cycle_level(struct cdp_cfg *cfg_pdev, int level)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->dutycycle_level[level];
|
|
|
}
|
|
|
|
|
|
int ol_cfg_is_full_reorder_offload(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->is_full_reorder_offload;
|
|
|
}
|
|
|
|
|
@@ -343,6 +364,7 @@ u_int8_t ol_cfg_is_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev)
|
|
|
int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->enable_rxthread;
|
|
|
}
|
|
|
|
|
@@ -356,6 +378,7 @@ int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev)
|
|
|
int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->tx_flow_stop_queue_th;
|
|
|
}
|
|
|
|
|
@@ -368,6 +391,7 @@ int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev)
|
|
|
int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->tx_flow_start_queue_offset;
|
|
|
}
|
|
|
|
|
@@ -377,36 +401,42 @@ int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev)
|
|
|
unsigned int ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
|
|
|
}
|
|
|
|
|
|
unsigned int ol_cfg_ipa_uc_tx_buf_size(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->ipa_uc_rsc.tx_buf_size;
|
|
|
}
|
|
|
|
|
|
unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->ipa_uc_rsc.tx_max_buf_cnt;
|
|
|
}
|
|
|
|
|
|
unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->ipa_uc_rsc.rx_ind_ring_size;
|
|
|
}
|
|
|
|
|
|
unsigned int ol_cfg_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->ipa_uc_rsc.tx_partition_base;
|
|
|
}
|
|
|
|
|
|
void ol_cfg_set_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev, uint32_t val)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
cfg->ipa_uc_rsc.tx_partition_base = val;
|
|
|
}
|
|
|
#endif /* IPA_OFFLOAD */
|
|
@@ -421,6 +451,7 @@ void ol_cfg_set_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev, uint32_t val)
|
|
|
bool ol_cfg_is_ce_classify_enabled(struct cdp_cfg *cfg_pdev)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
|
|
|
+
|
|
|
return cfg->ce_classify_enabled;
|
|
|
}
|
|
|
|