qcacmn: Set IRQ affinity for CE interrupts to gold cores

When CE tasklets and NET_RX gets scheduled on same CPU,
NET_RX is throttling CE tasklet resulting  WMI timeouts.

To avoid this affine CE irqs to gold cores, so that CE
tasklets will get enough CPU time.

Change-Id: Ided81a0565958aca6611eba911824c3485eca472
CRs-Fixed: 2933335
This commit is contained in:
Karthik Kantamneni
2021-04-28 13:41:29 +05:30
committed by Madan Koyyalamudi
orang tua d86c555e6a
melakukan 2720cae884
4 mengubah file dengan 81 tambahan dan 1 penghapusan

Melihat File

@@ -250,6 +250,18 @@ const char *hif_ipci_get_irq_name(int irq_no);
*/
int hif_ipci_enable_grp_irqs(struct hif_softc *scn);
#ifdef HIF_CPU_PERF_AFFINE_MASK
/** hif_ipci_config_irq_affinity() - set the irq affinity
* @scn: hif context
*
* set irq affinity hint for wlan irqs to gold cores only for
* defconfig builds.
*
* return: none
*/
void hif_ipci_config_irq_affinity(struct hif_softc *scn);
#endif
/**
* hif_ipci_disable_grp_irqs(): disable grp IRQs
* @scn: struct hif_softc

Melihat File

@@ -77,8 +77,13 @@ QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc)
&hif_ipci_clear_stats;
bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary;
bus_ops->hif_needs_bmi = &hif_ipci_needs_bmi;
#ifdef HIF_CPU_PERF_AFFINE_MASK
bus_ops->hif_config_irq_affinity =
&hif_ipci_config_irq_affinity;
#else
bus_ops->hif_config_irq_affinity =
&hif_dummy_config_irq_affinity;
#endif
bus_ops->hif_config_irq_by_ceid = &hif_dummy_config_irq_by_ceid;
bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info;
bus_ops->hif_enable_grp_irqs = hif_ipci_enable_grp_irqs;

Melihat File

@@ -558,6 +558,65 @@ const char *hif_ipci_get_irq_name(int irq_no)
return "pci-dummy";
}
#ifdef HIF_CPU_PERF_AFFINE_MASK
static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
{
int ret;
unsigned int cpus;
struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
struct CE_attr *host_ce_conf;
int ce_id;
qdf_cpu_mask ce_cpu_mask;
host_ce_conf = ce_sc->host_ce_config;
qdf_cpumask_clear(&ce_cpu_mask);
qdf_for_each_online_cpu(cpus) {
if (qdf_topology_physical_package_id(cpus) ==
CPU_CLUSTER_TYPE_PERF) {
qdf_cpumask_set_cpu(cpus,
&ce_cpu_mask);
}
}
if (qdf_cpumask_empty(&ce_cpu_mask)) {
hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
return;
}
for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
continue;
qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]);
qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id],
&ce_cpu_mask);
qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
IRQ_NO_BALANCING, 0);
ret = qdf_dev_set_irq_affinity(
ipci_sc->ce_msi_irq_num[ce_id],
(struct qdf_cpu_mask *)&ipci_sc->ce_irq_cpu_mask[ce_id]);
qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
0, IRQ_NO_BALANCING);
if (ret)
hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
qdf_cpumask_pr_args(
&ipci_sc->ce_irq_cpu_mask[ce_id]),
ipci_sc->ce_msi_irq_num[ce_id]);
else
hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
qdf_cpumask_pr_args(
&ipci_sc->ce_irq_cpu_mask[ce_id]),
ipci_sc->ce_msi_irq_num[ce_id]);
}
}
void hif_ipci_config_irq_affinity(struct hif_softc *scn)
{
hif_core_ctl_set_boost(true);
/* Set IRQ affinity for CE interrupts*/
hif_ipci_ce_irq_set_affinity_hint(scn);
}
#endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
int hif_ipci_configure_grp_irq(struct hif_softc *scn,
struct hif_exec_context *hif_ext_group)
{

Melihat File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -86,6 +86,10 @@ struct hif_ipci_softc {
#ifdef FORCE_WAKE
struct hif_ipci_stats stats;
#endif
#ifdef HIF_CPU_PERF_AFFINE_MASK
/* Stores the affinity hint mask for each CE IRQ */
qdf_cpu_mask ce_irq_cpu_mask[CE_COUNT_MAX];
#endif
};
int hif_configure_irq(struct hif_softc *sc);