qcacmn: Use qdf_cpuhp APIs for NAPI

Abstract NAPI's use of CPU hotplug events by using the new QDF CPU
hotplug APIs.

Change-Id: Iad590768476b4dc6bad63b3ee3b1b1bbf7698251
CRs-Fixed: 2141180
This commit is contained in:
Dustin Brown
2017-10-24 16:10:23 -07:00
committed by Sandeep Puligilla
parent 519d203401
commit 5d0d104798
2 changed files with 74 additions and 89 deletions

View File

@@ -282,6 +282,7 @@ struct qca_napi_cpu {
* @lilcl_head: * @lilcl_head:
* @bigcl_head: * @bigcl_head:
* @napi_mode: irq affinity & clock voting mode * @napi_mode: irq affinity & clock voting mode
* @cpuhp_handler: CPU hotplug event registration handle
*/ */
struct qca_napi_data { struct qca_napi_data {
struct hif_softc *hif_softc; struct hif_softc *hif_softc;
@@ -297,8 +298,7 @@ struct qca_napi_data {
struct qca_napi_cpu napi_cpu[NR_CPUS]; struct qca_napi_cpu napi_cpu[NR_CPUS];
int lilcl_head, bigcl_head; int lilcl_head, bigcl_head;
enum qca_napi_tput_state napi_mode; enum qca_napi_tput_state napi_mode;
struct notifier_block hnc_cpu_notifier; struct qdf_cpuhp_handler *cpuhp_handler;
bool cpu_notifier_registered;
uint8_t flags; uint8_t flags;
}; };

View File

@@ -53,6 +53,7 @@
#include <ce_api.h> #include <ce_api.h>
#include <ce_internal.h> #include <ce_internal.h>
#include <hif_irq_affinity.h> #include <hif_irq_affinity.h>
#include "qdf_cpuhp.h"
enum napi_decision_vector { enum napi_decision_vector {
HIF_NAPI_NOEVENT = 0, HIF_NAPI_NOEVENT = 0,
@@ -1052,107 +1053,91 @@ static int hnc_link_clusters(struct qca_napi_data *napid)
*/ */
/** /**
* hnc_cpu_notify_cb() - handles CPU hotplug events * hnc_cpu_online_cb() - handles CPU hotplug "up" events
* @context: the associated HIF context
* @cpu: the CPU Id of the CPU the event happened on
* *
* On transitions to online, we onlu handle the ONLINE event, * Return: None
* and ignore the PREP events, because we dont want to act too
* early.
* On transtion to offline, we act on PREP events, because
* we may need to move the irqs/NAPIs to another CPU before
* it is actually off-lined.
*
* Return: NOTIFY_OK (dont block action)
*/ */
static int hnc_cpu_notify_cb(struct notifier_block *nb, static void hnc_cpu_online_cb(void *context, uint32_t cpu)
unsigned long action,
void *hcpu)
{ {
int rc = NOTIFY_OK; struct hif_softc *hif = context;
unsigned long cpu = (unsigned long)hcpu; struct qca_napi_data *napid = &hif->napi_data;
struct hif_opaque_softc *hif;
struct qca_napi_data *napid = NULL;
NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu); if (cpu >= NR_CPUS)
return;
napid = qdf_container_of(nb, struct qca_napi_data, hnc_cpu_notifier); NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
hif = &qdf_container_of(napid, struct hif_softc, napi_data)->osc;
switch (action) { napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
case CPU_ONLINE: NAPI_DEBUG("%s: CPU %u marked %d",
napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; __func__, cpu, napid->napi_cpu[cpu].state);
NAPI_DEBUG("%s: CPU %ld marked %d",
__func__, cpu, napid->napi_cpu[cpu].state);
break;
case CPU_DEAD: /* already dead; we have marked it before, but ... */
case CPU_DEAD_FROZEN:
napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
NAPI_DEBUG("%s: CPU %ld marked %d",
__func__, cpu, napid->napi_cpu[cpu].state);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity", NAPI_DEBUG("<--%s", __func__);
__func__, cpu, napid->napi_cpu[cpu].state);
/**
* we need to move any NAPIs on this CPU out.
* if we are in LO throughput mode, then this is valid
* if the CPU is the the low designated CPU.
*/
hif_napi_event(hif,
NAPI_EVT_CPU_STATE,
(void *)
((cpu << 16) | napid->napi_cpu[cpu].state));
break;
default:
NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
break;
} /* switch */
NAPI_DEBUG("<--%s [%d]", __func__, rc);
return rc;
} }
/** /**
* hnc_hotplug_hook() - installs a hotplug notifier * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
* @hif_sc: hif_sc context * @context: the associated HIF context
* @register: !0 => register , =0 => deregister * @cpu: the CPU Id of the CPU the event happened on
* *
* Because the callback relies on the data layout of * On transtion to offline, we act on PREP events, because we may need to move
* struct hif_softc & its napi_data member, this callback * the irqs/NAPIs to another CPU before it is actually off-lined.
* registration requires that the hif_softc is passed in.
* *
* Note that this is different from the cpu notifier used by * Return: None
* rx_thread (cds_schedule.c).
* We may consider combining these modifiers in the future.
*
* Return: 0: success
* <0: error
*/ */
static int hnc_hotplug_hook(struct hif_softc *hif_sc, int install) static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
{ {
int rc = 0; struct hif_softc *hif = context;
struct qca_napi_data *napid = &hif->napi_data;
NAPI_DEBUG("-->%s(%d)", __func__, install); if (cpu >= NR_CPUS)
return;
if (install) { NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
hif_sc->napi_data.hnc_cpu_notifier.notifier_call
= hnc_cpu_notify_cb;
rc = register_hotcpu_notifier(
&hif_sc->napi_data.hnc_cpu_notifier);
if (rc == 0)
hif_sc->napi_data.cpu_notifier_registered = true;
} else {
if (hif_sc->napi_data.cpu_notifier_registered == true) {
unregister_hotcpu_notifier(
&hif_sc->napi_data.hnc_cpu_notifier);
hif_sc->napi_data.cpu_notifier_registered = false;
}
}
NAPI_DEBUG("<--%s()[%d]", __func__, rc); napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
return rc;
NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
__func__, cpu, napid->napi_cpu[cpu].state);
/**
* we need to move any NAPIs on this CPU out.
* if we are in LO throughput mode, then this is valid
* if the CPU is the the low designated CPU.
*/
hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
NAPI_EVT_CPU_STATE,
(void *)
((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
NAPI_DEBUG("<--%s", __func__);
}
static int hnc_hotplug_register(struct hif_softc *hif_sc)
{
QDF_STATUS status;
NAPI_DEBUG("-->%s", __func__);
status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
hif_sc,
hnc_cpu_online_cb,
hnc_cpu_before_offline_cb);
NAPI_DEBUG("<--%s [%d]", __func__, status);
return qdf_status_to_os_return(status);
}
static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
{
NAPI_DEBUG("-->%s", __func__);
if (hif_sc->napi_data.cpuhp_handler)
qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
NAPI_DEBUG("<--%s", __func__);
} }
/** /**
@@ -1245,7 +1230,7 @@ int hif_napi_cpu_init(struct hif_opaque_softc *hif)
goto lab_err_topology; goto lab_err_topology;
/* install hotplug notifier */ /* install hotplug notifier */
rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 1); rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
if (0 != rc) if (0 != rc)
goto lab_err_hotplug; goto lab_err_hotplug;
@@ -1256,7 +1241,7 @@ int hif_napi_cpu_init(struct hif_opaque_softc *hif)
lab_err_hotplug: lab_err_hotplug:
hnc_tput_hook(0); hnc_tput_hook(0);
hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0); hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
lab_err_topology: lab_err_topology:
memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
lab_rss_init: lab_rss_init:
@@ -1283,7 +1268,7 @@ int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
rc = hnc_tput_hook(0); rc = hnc_tput_hook(0);
/* uninstall hotplug notifier */ /* uninstall hotplug notifier */
rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0); hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
/* clear the topology table */ /* clear the topology table */
memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);