Преглед на файлове

qcacmn: Use qdf_cpuhp APIs for NAPI

Abstract NAPI's use of CPU hotplug events by using the new QDF CPU
hotplug APIs.

Change-Id: Iad590768476b4dc6bad63b3ee3b1b1bbf7698251
CRs-Fixed: 2141180
Dustin Brown преди 7 години
родител
ревизия
5d0d104798
променени са 2 файла, в които са добавени 74 реда и са изтрити 89 реда
  1. 2 2
      hif/inc/hif.h
  2. 72 87
      hif/src/hif_napi.c

+ 2 - 2
hif/inc/hif.h

@@ -282,6 +282,7 @@ struct qca_napi_cpu {
  * @lilcl_head:
  * @bigcl_head:
  * @napi_mode: irq affinity & clock voting mode
+ * @cpuhp_handler: CPU hotplug event registration handle
  */
 struct qca_napi_data {
 	struct               hif_softc *hif_softc;
@@ -297,8 +298,7 @@ struct qca_napi_data {
 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
 	int                  lilcl_head, bigcl_head;
 	enum qca_napi_tput_state napi_mode;
-	struct notifier_block hnc_cpu_notifier;
-	bool cpu_notifier_registered;
+	struct qdf_cpuhp_handler *cpuhp_handler;
 	uint8_t              flags;
 };
 

+ 72 - 87
hif/src/hif_napi.c

@@ -53,6 +53,7 @@
 #include <ce_api.h>
 #include <ce_internal.h>
 #include <hif_irq_affinity.h>
+#include "qdf_cpuhp.h"
 
 enum napi_decision_vector {
 	HIF_NAPI_NOEVENT = 0,
@@ -1052,107 +1053,91 @@ static int hnc_link_clusters(struct qca_napi_data *napid)
  */
 
 /**
- * hnc_cpu_notify_cb() - handles CPU hotplug events
+ * hnc_cpu_online_cb() - handles CPU hotplug "up" events
+ * @context: the associated HIF context
+ * @cpu: the CPU Id of the CPU the event happened on
  *
- * On transitions to online, we onlu handle the ONLINE event,
- * and ignore the PREP events, because we dont want to act too
- * early.
- * On transtion to offline, we act on PREP events, because
- * we may need to move the irqs/NAPIs to another CPU before
- * it is actually off-lined.
- *
- * Return: NOTIFY_OK (dont block action)
+ * Return: None
  */
-static int hnc_cpu_notify_cb(struct notifier_block *nb,
-			     unsigned long          action,
-			     void                  *hcpu)
+static void hnc_cpu_online_cb(void *context, uint32_t cpu)
 {
-	int rc = NOTIFY_OK;
-	unsigned long cpu = (unsigned long)hcpu;
-	struct hif_opaque_softc *hif;
-	struct qca_napi_data *napid = NULL;
-
-	NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu);
+	struct hif_softc *hif = context;
+	struct qca_napi_data *napid = &hif->napi_data;
 
-	napid = qdf_container_of(nb, struct qca_napi_data, hnc_cpu_notifier);
-	hif = &qdf_container_of(napid, struct hif_softc, napi_data)->osc;
+	if (cpu >= NR_CPUS)
+		return;
 
-	switch (action) {
-	case CPU_ONLINE:
-		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
-		NAPI_DEBUG("%s: CPU %ld marked %d",
-			   __func__, cpu, napid->napi_cpu[cpu].state);
-		break;
-	case CPU_DEAD: /* already dead; we have marked it before, but ... */
-	case CPU_DEAD_FROZEN:
-		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
-		NAPI_DEBUG("%s: CPU %ld marked %d",
-			   __func__, cpu, napid->napi_cpu[cpu].state);
-		break;
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
-		napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
+	NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
 
-		NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity",
-			   __func__, cpu, napid->napi_cpu[cpu].state);
+	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
+	NAPI_DEBUG("%s: CPU %u marked %d",
+		   __func__, cpu, napid->napi_cpu[cpu].state);
 
-		/**
-		 * we need to move any NAPIs on this CPU out.
-		 * if we are in LO throughput mode, then this is valid
-		 * if the CPU is the the low designated CPU.
-		 */
-		hif_napi_event(hif,
-			       NAPI_EVT_CPU_STATE,
-			       (void *)
-			       ((cpu << 16) | napid->napi_cpu[cpu].state));
-		break;
-	default:
-		NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
-		break;
-	} /* switch */
-	NAPI_DEBUG("<--%s [%d]", __func__, rc);
-	return rc;
+	NAPI_DEBUG("<--%s", __func__);
 }
 
 /**
- * hnc_hotplug_hook() - installs a hotplug notifier
- * @hif_sc: hif_sc context
- * @register: !0 => register , =0 => deregister
+ * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
+ * @context: the associated HIF context
+ * @cpu: the CPU Id of the CPU the event happened on
  *
- * Because the callback relies on the data layout of
- * struct hif_softc & its napi_data member, this callback
- * registration requires that the hif_softc is passed in.
+ * On transtion to offline, we act on PREP events, because we may need to move
+ * the irqs/NAPIs to another CPU before it is actually off-lined.
  *
- * Note that this is different from the cpu notifier used by
- * rx_thread (cds_schedule.c).
- * We may consider combining these modifiers in the future.
- *
- * Return: 0: success
- *        <0: error
+ * Return: None
  */
-static int hnc_hotplug_hook(struct hif_softc *hif_sc, int install)
+static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
 {
-	int rc = 0;
+	struct hif_softc *hif = context;
+	struct qca_napi_data *napid = &hif->napi_data;
 
-	NAPI_DEBUG("-->%s(%d)", __func__, install);
+	if (cpu >= NR_CPUS)
+		return;
 
-	if (install) {
-		hif_sc->napi_data.hnc_cpu_notifier.notifier_call
-			= hnc_cpu_notify_cb;
-		rc = register_hotcpu_notifier(
-			&hif_sc->napi_data.hnc_cpu_notifier);
-		if (rc == 0)
-			hif_sc->napi_data.cpu_notifier_registered = true;
-	} else {
-		if (hif_sc->napi_data.cpu_notifier_registered == true) {
-			unregister_hotcpu_notifier(
-				&hif_sc->napi_data.hnc_cpu_notifier);
-			hif_sc->napi_data.cpu_notifier_registered = false;
-		}
-	}
+	NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
 
-	NAPI_DEBUG("<--%s()[%d]", __func__, rc);
-	return rc;
+	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
+
+	NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
+		   __func__, cpu, napid->napi_cpu[cpu].state);
+
+	/**
+	 * we need to move any NAPIs on this CPU out.
+	 * if we are in LO throughput mode, then this is valid
+	 * if the CPU is the the low designated CPU.
+	 */
+	hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
+		       NAPI_EVT_CPU_STATE,
+		       (void *)
+		       ((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
+
+	NAPI_DEBUG("<--%s", __func__);
+}
+
+static int hnc_hotplug_register(struct hif_softc *hif_sc)
+{
+	QDF_STATUS status;
+
+	NAPI_DEBUG("-->%s", __func__);
+
+	status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
+				    hif_sc,
+				    hnc_cpu_online_cb,
+				    hnc_cpu_before_offline_cb);
+
+	NAPI_DEBUG("<--%s [%d]", __func__, status);
+
+	return qdf_status_to_os_return(status);
+}
+
+static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
+{
+	NAPI_DEBUG("-->%s", __func__);
+
+	if (hif_sc->napi_data.cpuhp_handler)
+		qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
+
+	NAPI_DEBUG("<--%s", __func__);
 }
 
 /**
@@ -1245,7 +1230,7 @@ int hif_napi_cpu_init(struct hif_opaque_softc *hif)
 		goto lab_err_topology;
 
 	/* install hotplug notifier */
-	rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 1);
+	rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
 	if (0 != rc)
 		goto lab_err_hotplug;
 
@@ -1256,7 +1241,7 @@ int hif_napi_cpu_init(struct hif_opaque_softc *hif)
 
 lab_err_hotplug:
 	hnc_tput_hook(0);
-	hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
+	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
 lab_err_topology:
 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
 lab_rss_init:
@@ -1283,7 +1268,7 @@ int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
 	rc = hnc_tput_hook(0);
 
 	/* uninstall hotplug notifier */
-	rc = hnc_hotplug_hook(HIF_GET_SOFTC(hif), 0);
+	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
 
 	/* clear the topology table */
 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);