qcacmn: Add changes to Affine away taken CPUs

Add changes to affinie away IRQ from the CPU taken
by audio driver during Pro audio use case.

Change-Id: I881c42e9f951fbf965be6d6a71994fd97791ee48
CRs-Fixed: 3502637
This commit is contained in:
Amit Mehta
2023-05-15 06:29:34 -07:00
committed by Rahul Choudhary
parent 40d067432f
commit 8a4a8722b0
12 changed files with 592 additions and 54 deletions

View File

@@ -120,9 +120,32 @@
0, \ 0, \
"Disable wake IRQ") "Disable wake IRQ")
/*
* <ini>
* irq_affine_audio_use_case - IRQ affinity for audio use case supported
* @Min: 0
* @Max: 1
* @Default: 0
*
* This ini controls driver to enable IRQ affinity for Pro audio use case.
*
* Related: None.
*
* Supported Feature: IRQ Affinity
*
* Usage: External
*
* </ini>
*/
#define CFG_IRQ_AFFINE_AUDIO_USE_CASE CFG_INI_BOOL( \
"irq_affine_audio_use_case", \
0, \
"Enable IRQ affinity for audio use case")
#define CFG_HIF \ #define CFG_HIF \
CFG_RING_TIMER_THRESHOLD \ CFG_RING_TIMER_THRESHOLD \
CFG_BATCH_COUNT_THRESHOLD \ CFG_BATCH_COUNT_THRESHOLD \
CFG(CFG_DISABLE_WAKE_IRQ) CFG(CFG_DISABLE_WAKE_IRQ) \
CFG(CFG_IRQ_AFFINE_AUDIO_USE_CASE)
#endif /* _CFG_HIF_H_ */ #endif /* _CFG_HIF_H_ */

View File

@@ -626,6 +626,29 @@ struct hif_event_misc {
uint64_t last_irq_ts; uint64_t last_irq_ts;
}; };
#ifdef WLAN_FEATURE_AFFINITY_MGR
/**
* struct hif_cpu_affinity - CPU affinity mask info for IRQ
*
* @current_irq_mask: Current CPU mask set for IRQ
* @wlan_requested_mask: CPU mask requested by WLAN
* @walt_taken_mask: Current CPU taken by Audio
* @last_updated: Last time IRQ CPU affinity was updated
* @last_affined_away: Last time when IRQ was affined away
* @update_requested: IRQ affinity hint set requested by WLAN
* @irq: IRQ number
*/
struct hif_cpu_affinity {
qdf_cpu_mask current_irq_mask;
qdf_cpu_mask wlan_requested_mask;
qdf_cpu_mask walt_taken_mask;
uint64_t last_updated;
uint64_t last_affined_away;
bool update_requested;
int irq;
};
#endif
/** /**
* struct hif_event_history - history for one interrupt group * struct hif_event_history - history for one interrupt group
* @index: index to store new event * @index: index to store new event
@@ -2761,4 +2784,118 @@ hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
#endif #endif
static inline QDF_STATUS
hif_irq_set_affinity_hint(int irq_num, qdf_cpu_mask *cpu_mask)
{
QDF_STATUS status;
qdf_dev_modify_irq_status(irq_num, IRQ_NO_BALANCING, 0);
status = qdf_dev_set_irq_affinity(irq_num,
(struct qdf_cpu_mask *)cpu_mask);
qdf_dev_modify_irq_status(irq_num, 0, IRQ_NO_BALANCING);
return status;
}
#ifdef WLAN_FEATURE_AFFINITY_MGR
/**
* hif_affinity_mgr_init_ce_irq() - Init for CE IRQ
* @scn: hif opaque handle
* @id: CE ID
* @irq: IRQ assigned
*
* Return: None
*/
void
hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq);
/**
* hif_affinity_mgr_init_grp_irq() - Init for group IRQ
* @scn: hif opaque handle
* @grp_id: GRP ID
* @irq_num: IRQ number of hif ext group
* @irq: IRQ number assigned
*
* Return: None
*/
void
hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
int irq_num, int irq);
/**
* hif_affinity_mgr_set_qrg_irq_affinity() - Set affinity for group IRQ
* @scn: hif opaque handle
* @irq: IRQ assigned
* @grp_id: GRP ID
* @irq_index: IRQ number of hif ext group
* @cpu_mask: reuquested cpu_mask for IRQ
*
* Return: status
*/
QDF_STATUS
hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
uint32_t grp_id, uint32_t irq_index,
qdf_cpu_mask *cpu_mask);
/**
* hif_affinity_mgr_set_ce_irq_affinity() - Set affinity for CE IRQ
* @scn: hif opaque handle
* @irq: IRQ assigned
* @ce_id: CE ID
* @cpu_mask: reuquested cpu_mask for IRQ
*
* Return: status
*/
QDF_STATUS
hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
uint32_t ce_id, qdf_cpu_mask *cpu_mask);
/**
* hif_affinity_mgr_affine_irq() - Affine CE and GRP IRQs
* @scn: hif opaque handle
*
* Return: None
*/
void hif_affinity_mgr_affine_irq(struct hif_softc *scn);
#else
static inline void
hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
{
}
static inline void
hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id, int irq_num,
int irq)
{
}
static inline QDF_STATUS
hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
uint32_t grp_id, uint32_t irq_index,
qdf_cpu_mask *cpu_mask)
{
return hif_irq_set_affinity_hint(irq, cpu_mask);
}
static inline QDF_STATUS
hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
uint32_t ce_id, qdf_cpu_mask *cpu_mask)
{
return hif_irq_set_affinity_hint(irq, cpu_mask);
}
static inline
void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
{
}
#endif
/**
* hif_affinity_mgr_set_affinity() - Affine CE and GRP IRQs
* @scn: hif opaque handle
*
* Return: None
*/
void hif_affinity_mgr_set_affinity(struct hif_opaque_softc *scn);
#endif /* _HIF_H_ */ #endif /* _HIF_H_ */

View File

@@ -470,3 +470,7 @@ void hif_dummy_set_grp_intr_affinity(struct hif_softc *scn,
{ {
} }
#endif #endif
void hif_dummy_affinity_mgr_set_affinity(struct hif_softc *scn)
{
}

View File

@@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021,2023 Qualcomm Innovation Center, Inc. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -100,3 +100,4 @@ void hif_dummy_config_irq_clear_cpu_affinity(struct hif_softc *scn,
void hif_dummy_set_grp_intr_affinity(struct hif_softc *scn, void hif_dummy_set_grp_intr_affinity(struct hif_softc *scn,
uint32_t grp_intr_bitmask, bool perf); uint32_t grp_intr_bitmask, bool perf);
#endif #endif
void hif_dummy_affinity_mgr_set_affinity(struct hif_softc *scn);

View File

@@ -71,6 +71,8 @@ static void hif_initialize_default_ops(struct hif_softc *hif_sc)
#ifdef FEATURE_IRQ_AFFINITY #ifdef FEATURE_IRQ_AFFINITY
bus_ops->hif_set_grp_intr_affinity = &hif_dummy_set_grp_intr_affinity; bus_ops->hif_set_grp_intr_affinity = &hif_dummy_set_grp_intr_affinity;
#endif #endif
bus_ops->hif_affinity_mgr_set_affinity =
&hif_dummy_affinity_mgr_set_affinity;
} }
#define NUM_OPS (sizeof(struct hif_bus_ops) / sizeof(void *)) #define NUM_OPS (sizeof(struct hif_bus_ops) / sizeof(void *))
@@ -717,3 +719,16 @@ void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
perf); perf);
} }
#endif #endif
void hif_affinity_mgr_set_affinity(struct hif_opaque_softc *scn)
{
struct hif_softc *hif_sc = HIF_GET_SOFTC(scn);
if (!hif_sc)
return;
if (!hif_sc->bus_ops.hif_affinity_mgr_set_affinity)
return;
hif_sc->bus_ops.hif_affinity_mgr_set_affinity(hif_sc);
}

View File

@@ -99,6 +99,7 @@ struct hif_bus_ops {
void (*hif_set_grp_intr_affinity)(struct hif_softc *scn, void (*hif_set_grp_intr_affinity)(struct hif_softc *scn,
uint32_t grp_intr_bitmask, bool perf); uint32_t grp_intr_bitmask, bool perf);
#endif #endif
void (*hif_affinity_mgr_set_affinity)(struct hif_softc *scn);
}; };
#ifdef HIF_SNOC #ifdef HIF_SNOC

View File

@@ -93,7 +93,9 @@ QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc)
#ifdef FEATURE_IRQ_AFFINITY #ifdef FEATURE_IRQ_AFFINITY
bus_ops->hif_set_grp_intr_affinity = &hif_ipci_set_grp_intr_affinity; bus_ops->hif_set_grp_intr_affinity = &hif_ipci_set_grp_intr_affinity;
#endif #endif
#ifdef WLAN_FEATURE_AFFINITY_MGR
bus_ops->hif_affinity_mgr_set_affinity = &hif_affinity_mgr_affine_irq;
#endif
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }

View File

@@ -98,7 +98,9 @@ QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc)
#ifdef FEATURE_IRQ_AFFINITY #ifdef FEATURE_IRQ_AFFINITY
bus_ops->hif_set_grp_intr_affinity = &hif_pci_set_grp_intr_affinity; bus_ops->hif_set_grp_intr_affinity = &hif_pci_set_grp_intr_affinity;
#endif #endif
#ifdef WLAN_FEATURE_AFFINITY_MGR
bus_ops->hif_affinity_mgr_set_affinity = &hif_affinity_mgr_affine_irq;
#endif
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }

View File

@@ -1136,6 +1136,38 @@ static inline void hif_latency_detect_init(struct hif_softc *scn)
static inline void hif_latency_detect_deinit(struct hif_softc *scn) static inline void hif_latency_detect_deinit(struct hif_softc *scn)
{} {}
#endif #endif
#ifdef WLAN_FEATURE_AFFINITY_MGR
#define AFFINITY_THRESHOLD 5000000
static inline void
hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
{
unsigned int cpus;
qdf_cpu_mask allowed_mask;
scn->affinity_mgr_supported =
(cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
qdf_walt_get_cpus_taken_supported());
hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
if (!scn->affinity_mgr_supported)
return;
scn->time_threshold = AFFINITY_THRESHOLD;
qdf_for_each_possible_cpu(cpus)
if (qdf_topology_physical_package_id(cpus) ==
CPU_CLUSTER_TYPE_LITTLE)
qdf_cpumask_set_cpu(cpus, &allowed_mask);
qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
}
#else
static inline void
hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
{
}
#endif
struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
uint32_t mode, uint32_t mode,
enum qdf_bus_type bus_type, enum qdf_bus_type bus_type,
@@ -1184,6 +1216,7 @@ struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
hif_cpuhp_register(scn); hif_cpuhp_register(scn);
hif_latency_detect_init(scn); hif_latency_detect_init(scn);
hif_affinity_mgr_init(scn, psoc);
out: out:
return GET_HIF_OPAQUE_HDL(scn); return GET_HIF_OPAQUE_HDL(scn);
@@ -2495,3 +2528,333 @@ int hif_system_pm_state_check(struct hif_opaque_softc *hif)
return 0; return 0;
} }
#endif #endif
#ifdef WLAN_FEATURE_AFFINITY_MGR
/*
* hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
*
* @scn: hif handle
* @cfg: hif affinity manager configuration for IRQ
* @audio_taken_cpu: Current CPUs which are taken by audio.
* @current_time: Current system time.
*
* This API checks for 2 conditions
* 1) Last audio taken mask and current taken mask are different
* 2) Last time when IRQ was affined away due to audio taken CPUs is
* more than time threshold (5 Seconds in current case).
* If both condition satisfies then only return true.
*
* Return: bool: true if it is allowed to affine away audio taken cpus.
*/
static inline bool
hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
struct hif_cpu_affinity *cfg,
qdf_cpu_mask audio_taken_cpu,
uint64_t current_time)
{
if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
(qdf_log_timestamp_to_usecs(current_time -
cfg->last_affined_away)
< scn->time_threshold))
return false;
return true;
}
/*
* hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
*
* @scn: hif handle
* @cfg: hif affinity manager configuration for IRQ
* @audio_taken_cpu: Current CPUs which are taken by audio.
* @cpu_mask: CPU mask which need to be updated.
* @current_time: Current system time.
*
* This API checks if Pro audio use case is running and if cpu_mask need
* to be updated
*
* Return: QDF_STATUS
*/
static inline QDF_STATUS
hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
struct hif_cpu_affinity *cfg,
qdf_cpu_mask audio_taken_cpu,
qdf_cpu_mask *cpu_mask,
uint64_t current_time)
{
qdf_cpu_mask allowed_mask;
/*
* Case 1: audio_taken_mask is empty
* Check if passed cpu_mask and wlan_requested_mask is same or not.
* If both mask are different copy wlan_requested_mask(IRQ affinity
* mask requested by WLAN) to cpu_mask.
*
* Case 2: audio_taken_mask is not empty
* 1. Only allow update if last time when IRQ was affined away due to
* audio taken CPUs is more than 5 seconds or update is requested
* by WLAN
* 2. Only allow silver cores to be affined away.
* 3. Check if any allowed CPUs for audio use case is set in cpu_mask.
* i. If any CPU mask is set, mask out that CPU from the cpu_mask
* ii. If after masking out audio taken cpu(Silver cores) cpu_mask
* is empty, set mask to all cpu except cpus taken by audio.
* Example:
*| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
*| 0x00 | 0x00 | 0x0C | 0x0C | 0x0C |
*| 0x00 | 0x00 | 0x03 | 0x03 | 0x03 |
*| 0x00 | 0x00 | 0xFC | 0x03 | 0x03 |
*| 0x00 | 0x00 | 0x03 | 0x0C | 0x0C |
*| 0x0F | 0x03 | 0x0C | 0x0C | 0x0C |
*| 0x0F | 0x03 | 0x03 | 0x03 | 0xFC |
*| 0x03 | 0x03 | 0x0C | 0x0C | 0x0C |
*| 0x03 | 0x03 | 0x03 | 0x03 | 0xFC |
*| 0x03 | 0x03 | 0xFC | 0x03 | 0xFC |
*| 0xF0 | 0x00 | 0x0C | 0x0C | 0x0C |
*| 0xF0 | 0x00 | 0x03 | 0x03 | 0x03 |
*/
/* Check if audio taken mask is empty*/
if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
/* If CPU mask requested by WLAN for the IRQ and
* cpu_mask passed CPU mask set for IRQ is different
* Copy requested mask into cpu_mask and return
*/
if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
&cfg->wlan_requested_mask))) {
qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
return QDF_STATUS_SUCCESS;
}
return QDF_STATUS_E_ALREADY;
}
if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
current_time) ||
cfg->update_requested))
return QDF_STATUS_E_AGAIN;
/* Only allow Silver cores to be affine away */
qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
/* If any of taken CPU(Silver cores) mask is set in cpu_mask,
* mask out the audio taken CPUs from the cpu_mask.
*/
qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
&allowed_mask);
/* If cpu_mask is empty set it to all CPUs
* except taken by audio(Silver cores)
*/
if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
qdf_cpumask_complement(cpu_mask, &allowed_mask);
return QDF_STATUS_SUCCESS;
}
return QDF_STATUS_E_ALREADY;
}
static inline QDF_STATUS
hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
uint64_t current_time)
{
QDF_STATUS status;
status = hif_affinity_mgr_check_update_mask(scn, cfg,
audio_taken_cpu,
&cpu_mask,
current_time);
/* Set IRQ affinity if CPU mask was updated */
if (QDF_IS_STATUS_SUCCESS(status)) {
status = hif_irq_set_affinity_hint(cfg->irq,
&cpu_mask);
if (QDF_IS_STATUS_SUCCESS(status)) {
/* Store audio taken CPU mask */
qdf_cpumask_copy(&cfg->walt_taken_mask,
&audio_taken_cpu);
/* Store CPU mask which was set for IRQ*/
qdf_cpumask_copy(&cfg->current_irq_mask,
&cpu_mask);
/* Set time when IRQ affinity was updated */
cfg->last_updated = current_time;
if (hif_audio_cpu_affinity_allowed(scn, cfg,
audio_taken_cpu,
current_time))
/* If CPU mask was updated due to CPU
* taken by audio, update
* last_affined_away time
*/
cfg->last_affined_away = current_time;
}
}
return status;
}
void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
{
bool audio_affinity_allowed = false;
int i, j, ce_id;
uint64_t current_time;
char cpu_str[10];
QDF_STATUS status;
qdf_cpu_mask cpu_mask, audio_taken_cpu;
struct HIF_CE_state *hif_state;
struct hif_exec_context *hif_ext_group;
struct CE_attr *host_ce_conf;
struct HIF_CE_state *ce_sc;
struct hif_cpu_affinity *cfg;
if (!scn->affinity_mgr_supported)
return;
current_time = hif_get_log_timestamp();
/* Get CPU mask for audio taken CPUs */
audio_taken_cpu = qdf_walt_get_cpus_taken();
ce_sc = HIF_GET_CE_STATE(scn);
host_ce_conf = ce_sc->host_ce_config;
for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
continue;
cfg = &scn->ce_irq_cpu_mask[ce_id];
qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
status =
hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
cpu_mask, current_time);
if (QDF_IS_STATUS_SUCCESS(status))
audio_affinity_allowed = true;
}
hif_state = HIF_GET_CE_STATE(scn);
for (i = 0; i < hif_state->hif_num_extgroup; i++) {
hif_ext_group = hif_state->hif_ext_group[i];
for (j = 0; j < hif_ext_group->numirq; j++) {
cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
status =
hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
cpu_mask, current_time);
if (QDF_IS_STATUS_SUCCESS(status)) {
qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
audio_affinity_allowed = true;
}
}
}
if (audio_affinity_allowed) {
qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
&audio_taken_cpu);
hif_info("Audio taken CPU mask: %s", cpu_str);
}
}
static inline QDF_STATUS
hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
struct hif_cpu_affinity *cfg,
qdf_cpu_mask *cpu_mask)
{
uint64_t current_time;
char cpu_str[10];
QDF_STATUS status, mask_updated;
qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
current_time = hif_get_log_timestamp();
qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
cfg->update_requested = true;
mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
audio_taken_cpu,
cpu_mask,
current_time);
status = hif_irq_set_affinity_hint(irq, cpu_mask);
if (QDF_IS_STATUS_SUCCESS(status)) {
qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
cfg->last_updated = current_time;
if (hif_audio_cpu_affinity_allowed(scn, cfg,
audio_taken_cpu,
current_time)) {
cfg->last_affined_away = current_time;
qdf_thread_cpumap_print_to_pagebuf(false,
cpu_str,
&audio_taken_cpu);
hif_info_rl("Audio taken CPU mask: %s",
cpu_str);
}
}
}
cfg->update_requested = false;
return status;
}
QDF_STATUS
hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
uint32_t grp_id, uint32_t irq_index,
qdf_cpu_mask *cpu_mask)
{
struct hif_cpu_affinity *cfg;
if (!scn->affinity_mgr_supported)
return hif_irq_set_affinity_hint(irq, cpu_mask);
cfg = &scn->irq_cpu_mask[grp_id][irq_index];
return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
}
QDF_STATUS
hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
uint32_t ce_id, qdf_cpu_mask *cpu_mask)
{
struct hif_cpu_affinity *cfg;
if (!scn->affinity_mgr_supported)
return hif_irq_set_affinity_hint(irq, cpu_mask);
cfg = &scn->ce_irq_cpu_mask[ce_id];
return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
}
void
hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
{
unsigned int cpus;
qdf_cpu_mask cpu_mask;
struct hif_cpu_affinity *cfg = NULL;
if (!scn->affinity_mgr_supported)
return;
/* Set CPU Mask to all possible CPUs */
qdf_for_each_possible_cpu(cpus)
qdf_cpumask_set_cpu(cpus, &cpu_mask);
cfg = &scn->ce_irq_cpu_mask[id];
qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
cfg->irq = irq;
cfg->last_updated = 0;
cfg->last_affined_away = 0;
cfg->update_requested = false;
}
void
hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
int irq_num, int irq)
{
unsigned int cpus;
qdf_cpu_mask cpu_mask;
struct hif_cpu_affinity *cfg = NULL;
if (!scn->affinity_mgr_supported)
return;
/* Set CPU Mask to all possible CPUs */
qdf_for_each_possible_cpu(cpus)
qdf_cpumask_set_cpu(cpus, &cpu_mask);
cfg = &scn->irq_cpu_mask[grp_id][irq_num];
qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
cfg->irq = irq;
cfg->last_updated = 0;
cfg->last_affined_away = 0;
cfg->update_requested = false;
}
#endif

View File

@@ -421,6 +421,14 @@ struct hif_softc {
struct pld_shadow_reg_v3_cfg shadow_regs[MAX_SHADOW_REGS]; struct pld_shadow_reg_v3_cfg shadow_regs[MAX_SHADOW_REGS];
int num_shadow_registers_configured; int num_shadow_registers_configured;
#endif #endif
#ifdef WLAN_FEATURE_AFFINITY_MGR
/* CPU Affinity info of IRQs */
bool affinity_mgr_supported;
uint64_t time_threshold;
struct hif_cpu_affinity ce_irq_cpu_mask[CE_COUNT_MAX];
struct hif_cpu_affinity irq_cpu_mask[HIF_MAX_GROUP][HIF_MAX_GRP_IRQ];
qdf_cpu_mask allowed_mask;
#endif
}; };
#if defined(NUM_SOC_PERF_CLUSTER) && (NUM_SOC_PERF_CLUSTER > 1) #if defined(NUM_SOC_PERF_CLUSTER) && (NUM_SOC_PERF_CLUSTER > 1)

View File

@@ -577,14 +577,10 @@ void hif_ipci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
} }
for (i = 0; i < hif_ext_group->numirq; i++) { for (i = 0; i < hif_ext_group->numirq; i++) {
if (mask_set) { if (mask_set) {
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i], ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
IRQ_NO_BALANCING, 0); hif_ext_group->os_irq[i],
ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i], hif_ext_group->grp_id, i,
(struct qdf_cpu_mask *) &hif_ext_group->new_cpu_mask[i]);
&hif_ext_group->
new_cpu_mask[i]);
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
0, IRQ_NO_BALANCING);
if (ret) if (ret)
qdf_debug("Set affinity %*pbl fails for IRQ %d ", qdf_debug("Set affinity %*pbl fails for IRQ %d ",
qdf_cpumask_pr_args(&hif_ext_group-> qdf_cpumask_pr_args(&hif_ext_group->
@@ -624,7 +620,7 @@ static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn); struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
struct CE_attr *host_ce_conf; struct CE_attr *host_ce_conf;
int ce_id; int ce_id;
qdf_cpu_mask ce_cpu_mask; qdf_cpu_mask ce_cpu_mask, updated_mask;
int perf_cpu_cluster = hif_get_perf_cluster_bitmap(); int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
int package_id; int package_id;
@@ -645,16 +641,13 @@ static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
continue; continue;
qdf_cpumask_copy(&updated_mask, &ce_cpu_mask);
ret = hif_affinity_mgr_set_ce_irq_affinity(scn, ipci_sc->ce_msi_irq_num[ce_id],
ce_id,
&updated_mask);
qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]); qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]);
qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id], qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id],
&ce_cpu_mask); &updated_mask);
qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
IRQ_NO_BALANCING, 0);
ret = qdf_dev_set_irq_affinity(
ipci_sc->ce_msi_irq_num[ce_id],
(struct qdf_cpu_mask *)&ipci_sc->ce_irq_cpu_mask[ce_id]);
qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
0, IRQ_NO_BALANCING);
if (ret) if (ret)
hif_err_rl("Set affinity %*pbl fails for CE IRQ %d", hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
qdf_cpumask_pr_args( qdf_cpumask_pr_args(
@@ -690,14 +683,10 @@ void hif_ipci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]); qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
qdf_cpumask_clear_cpu(cpu, qdf_cpumask_clear_cpu(cpu,
&hif_ext_group->new_cpu_mask[i]); &hif_ext_group->new_cpu_mask[i]);
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i], ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
IRQ_NO_BALANCING, 0); hif_ext_group->os_irq[i],
ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i], hif_ext_group->grp_id, i,
(struct qdf_cpu_mask *) &hif_ext_group->new_cpu_mask[i]);
&hif_ext_group->
new_cpu_mask[i]);
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
0, IRQ_NO_BALANCING);
if (ret) if (ret)
hif_err("Set affinity %*pbl fails for IRQ %d ", hif_err("Set affinity %*pbl fails for IRQ %d ",
qdf_cpumask_pr_args(&hif_ext_group-> qdf_cpumask_pr_args(&hif_ext_group->

View File

@@ -3071,6 +3071,8 @@ int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
pci_sc->ce_irq_num[ce_id] = irq; pci_sc->ce_irq_num[ce_id] = irq;
hif_affinity_mgr_init_ce_irq(scn, ce_id, irq);
qdf_scnprintf(ce_irqname[pci_slot][ce_id], qdf_scnprintf(ce_irqname[pci_slot][ce_id],
DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u", DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
pci_slot, ce_id); pci_slot, ce_id);
@@ -3234,14 +3236,10 @@ void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
} }
for (i = 0; i < hif_ext_group->numirq; i++) { for (i = 0; i < hif_ext_group->numirq; i++) {
if (mask_set) { if (mask_set) {
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i], ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
IRQ_NO_BALANCING, 0); hif_ext_group->os_irq[i],
ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i], hif_ext_group->grp_id, i,
(struct qdf_cpu_mask *) &hif_ext_group->new_cpu_mask[i]);
&hif_ext_group->
new_cpu_mask[i]);
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
0, IRQ_NO_BALANCING);
if (ret) if (ret)
qdf_debug("Set affinity %*pbl fails for IRQ %d ", qdf_debug("Set affinity %*pbl fails for IRQ %d ",
qdf_cpumask_pr_args(&hif_ext_group-> qdf_cpumask_pr_args(&hif_ext_group->
@@ -3264,7 +3262,7 @@ void hif_pci_ce_irq_set_affinity_hint(struct hif_softc *scn)
struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn); struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
struct CE_attr *host_ce_conf; struct CE_attr *host_ce_conf;
int ce_id; int ce_id;
qdf_cpu_mask ce_cpu_mask; qdf_cpu_mask ce_cpu_mask, updated_mask;
int perf_cpu_cluster = hif_get_perf_cluster_bitmap(); int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
int package_id; int package_id;
@@ -3288,16 +3286,13 @@ void hif_pci_ce_irq_set_affinity_hint(struct hif_softc *scn)
for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR) if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
continue; continue;
qdf_cpumask_copy(&updated_mask, &ce_cpu_mask);
ret = hif_affinity_mgr_set_ce_irq_affinity(scn, pci_sc->ce_irq_num[ce_id],
ce_id,
&updated_mask);
qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]); qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id], qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
&ce_cpu_mask); &updated_mask);
qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
IRQ_NO_BALANCING, 0);
ret = qdf_dev_set_irq_affinity(
pci_sc->ce_irq_num[ce_id],
(struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
0, IRQ_NO_BALANCING);
if (ret) if (ret)
hif_err_rl("Set affinity %*pbl fails for CE IRQ %d", hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
qdf_cpumask_pr_args( qdf_cpumask_pr_args(
@@ -3327,14 +3322,10 @@ void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]); qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
qdf_cpumask_clear_cpu(cpu, qdf_cpumask_clear_cpu(cpu,
&hif_ext_group->new_cpu_mask[i]); &hif_ext_group->new_cpu_mask[i]);
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i], ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
IRQ_NO_BALANCING, 0); hif_ext_group->os_irq[i],
ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i], hif_ext_group->grp_id, i,
(struct qdf_cpu_mask *) &hif_ext_group->new_cpu_mask[i]);
&hif_ext_group->
new_cpu_mask[i]);
qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
0, IRQ_NO_BALANCING);
if (ret) if (ret)
hif_err("Set affinity %*pbl fails for IRQ %d ", hif_err("Set affinity %*pbl fails for IRQ %d ",
qdf_cpumask_pr_args(&hif_ext_group-> qdf_cpumask_pr_args(&hif_ext_group->
@@ -3494,6 +3485,8 @@ int hif_pci_configure_grp_irq(struct hif_softc *scn,
return -EFAULT; return -EFAULT;
} }
hif_ext_group->os_irq[j] = irq; hif_ext_group->os_irq[j] = irq;
hif_affinity_mgr_init_grp_irq(scn, hif_ext_group->grp_id,
j, irq);
} }
hif_ext_group->irq_requested = true; hif_ext_group->irq_requested = true;
return 0; return 0;