qcacmn: Use qdf_cpuhp APIs for NAPI

Abstract NAPI's use of CPU hotplug events by using the new QDF CPU
hotplug APIs.

Change-Id: Iad590768476b4dc6bad63b3ee3b1b1bbf7698251
CRs-Fixed: 2141180
Этот коммит содержится в:
Dustin Brown
2017-10-24 16:10:23 -07:00
коммит произвёл Sandeep Puligilla
родитель 519d203401
Коммит 5d0d104798
2 изменённых файлов: 74 добавлений и 89 удалений

Просмотреть файл

@@ -282,6 +282,7 @@ struct qca_napi_cpu {
* @lilcl_head:
* @bigcl_head:
* @napi_mode: irq affinity & clock voting mode
* @cpuhp_handler: CPU hotplug event registration handle
*/
struct qca_napi_data {
struct hif_softc *hif_softc;
@@ -297,8 +298,7 @@ struct qca_napi_data {
struct qca_napi_cpu napi_cpu[NR_CPUS];
int lilcl_head, bigcl_head;
enum qca_napi_tput_state napi_mode;
struct notifier_block hnc_cpu_notifier;
bool cpu_notifier_registered;
struct qdf_cpuhp_handler *cpuhp_handler;
uint8_t flags;
};

Просмотреть файл

@@ -53,6 +53,7 @@
#include <ce_api.h>
#include <ce_internal.h>
#include <hif_irq_affinity.h>
#include "qdf_cpuhp.h"
enum napi_decision_vector {
HIF_NAPI_NOEVENT = 0,
@@ -1052,48 +1053,52 @@ static int hnc_link_clusters(struct qca_napi_data *napid)
*/
/**
* hnc_cpu_notify_cb() - handles CPU hotplug events
* hnc_cpu_online_cb() - handles CPU hotplug "up" events
* @context: the associated HIF context
* @cpu: the CPU Id of the CPU the event happened on
*
* On transitions to online, we onlu handle the ONLINE event,
* and ignore the PREP events, because we dont want to act too
* early.
* On transtion to offline, we act on PREP events, because
* we may need to move the irqs/NAPIs to another CPU before
* it is actually off-lined.
*
* Return: NOTIFY_OK (dont block action)
* Return: None
*/
static int hnc_cpu_notify_cb(struct notifier_block *nb,
unsigned long action,
void *hcpu)
static void hnc_cpu_online_cb(void *context, uint32_t cpu)
{
int rc = NOTIFY_OK;
unsigned long cpu = (unsigned long)hcpu;
struct hif_opaque_softc *hif;
struct qca_napi_data *napid = NULL;
struct hif_softc *hif = context;
struct qca_napi_data *napid = &hif->napi_data;
NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu);
if (cpu >= NR_CPUS)
return;
napid = qdf_container_of(nb, struct qca_napi_data, hnc_cpu_notifier);
hif = &qdf_container_of(napid, struct hif_softc, napi_data)->osc;
NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
switch (action) {
case CPU_ONLINE:
napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
NAPI_DEBUG("%s: CPU %ld marked %d",
NAPI_DEBUG("%s: CPU %u marked %d",
__func__, cpu, napid->napi_cpu[cpu].state);
break;
case CPU_DEAD: /* already dead; we have marked it before, but ... */
case CPU_DEAD_FROZEN:
napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
NAPI_DEBUG("%s: CPU %ld marked %d",
__func__, cpu, napid->napi_cpu[cpu].state);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
NAPI_DEBUG("<--%s", __func__);
}
/**
* hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
* @context: the associated HIF context
* @cpu: the CPU Id of the CPU the event happened on
*
* On transtion to offline, we act on PREP events, because we may need to move
* the irqs/NAPIs to another CPU before it is actually off-lined.
*
* Return: None
*/
static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
{
struct hif_softc *hif = context;
struct qca_napi_data *napid = &hif->napi_data;
if (cpu >= NR_CPUS)
return;
NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity",
NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
__func__, cpu, napid->napi_cpu[cpu].state);
/**
@@ -1101,58 +1106,38 @@ static int hnc_cpu_notify_cb(struct notifier_block *nb,
* if we are in LO throughput mode, then this is valid
* if the CPU is the the low designated CPU.
*/
hif_napi_event(hif,
hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
NAPI_EVT_CPU_STATE,
(void *)
((cpu << 16) | napid->napi_cpu[cpu].state));
break;
default:
NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
break;
} /* switch */
NAPI_DEBUG("<--%s [%d]", __func__, rc);
return rc;
((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
NAPI_DEBUG("<--%s", __func__);
}
/**
* hnc_hotplug_hook() - installs a hotplug notifier
* @hif_sc: hif_sc context
* @register: !0 => register , =0 => deregister
*
* Because the callback relies on the data layout of
* struct hif_softc & its napi_data member, this callback
* registration requires that the hif_softc is passed in.
*
* Note that this is different from the cpu notifier used by
* rx_thread (cds_schedule.c).
* We may consider combining these modifiers in the future.
*
* Return: 0: success
* <0: error
*/
static int hnc_hotplug_hook(struct hif_softc *hif_sc, int install)
static int hnc_hotplug_register(struct hif_softc *hif_sc)
{
int rc = 0;
QDF_STATUS status;
NAPI_DEBUG("-->%s(%d)", __func__, install);
NAPI_DEBUG("-->%s", __func__);
if (install) {
hif_sc->napi_data.hnc_cpu_notifier.notifier_call
= hnc_cpu_notify_cb;
rc = register_hotcpu_notifier(
&hif_sc->napi_data.hnc_cpu_notifier);
if (rc == 0)
hif_sc->napi_data.cpu_notifier_registered = true;
} else {
if (hif_sc->napi_data.cpu_notifier_registered == true) {
unregister_hotcpu_notifier(
&hif_sc->napi_data.hnc_cpu_notifier);
hif_sc->napi_data.cpu_notifier_registered = false;
}
status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
hif_sc,
hnc_cpu_online_cb,
hnc_cpu_before_offline_cb);
NAPI_DEBUG("<--%s [%d]", __func__, status);
return qdf_status_to_os_return(status);
}
NAPI_DEBUG("<--%s()[%d]", __func__, rc);
return rc;
static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
{
NAPI_DEBUG("-->%s", __func__);
if (hif_sc->napi_data.cpuhp_handler)
qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
NAPI_DEBUG("<--%s", __func__);
}
/**
@@ -1245,7 +1230,7 @@ int hif_napi_cpu_init(struct hif_opaque_softc *hif)
goto lab_err_topology;
/* install hotplug notifier */
rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 1);
rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
if (0 != rc)
goto lab_err_hotplug;
@@ -1256,7 +1241,7 @@ int hif_napi_cpu_init(struct hif_opaque_softc *hif)
lab_err_hotplug:
hnc_tput_hook(0);
hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
lab_err_topology:
memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
lab_rss_init:
@@ -1283,7 +1268,7 @@ int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
rc = hnc_tput_hook(0);
/* uninstall hotplug notifier */
rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
/* clear the topology table */
memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);