Sfoglia il codice sorgente

qcacld-3.0: Use qdf_cpuhp APIs for CDS

Abstract CDS's use of CPU hotplug events by using the new QDF CPU
hotplug APIs.

Change-Id: Id37ef9028f710a55938539c8e00bb63ffeb86495
CRs-Fixed: 2139720
Dustin Brown 7 anni fa
parent
commit
24d91177c2
2 ha cambiato i file con 74 aggiunte e 75 eliminazioni
  1. 3 2
      core/cds/inc/cds_sched.h
  2. 71 73
      core/cds/src/cds_sched.c

+ 3 - 2
core/cds/inc/cds_sched.h

@@ -49,6 +49,7 @@
 #include "qdf_lock.h"
 #include "qdf_mc_timer.h"
 #include "cds_config.h"
+#include "qdf_cpuhp.h"
 
 #define TX_POST_EVENT               0x001
 #define TX_SUSPEND_EVENT            0x002
@@ -142,8 +143,8 @@ typedef struct _cds_sched_context {
 	/* Free message queue for OL Rx processing */
 	struct list_head cds_ol_rx_pkt_freeq;
 
-	/* cpu hotplug notifier */
-	struct notifier_block *cpu_hot_plug_notifier;
+	/* The CPU hotplug event registration handle, used to unregister */
+	struct qdf_cpuhp_handler *cpuhp_event_handle;
 
 	/* affinity lock */
 	struct mutex affinity_lock;

+ 71 - 73
core/cds/src/cds_sched.c

@@ -89,7 +89,7 @@ enum notifier_state {
 static p_cds_sched_context gp_cds_sched_context;
 #ifdef QCA_CONFIG_SMP
 static int cds_ol_rx_thread(void *arg);
-static unsigned long affine_cpu;
+static uint32_t affine_cpu;
 static QDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
 
 #define CDS_CORE_PER_CLUSTER (4)
@@ -329,60 +329,66 @@ int cds_sched_handle_throughput_req(bool high_tput_required)
 	return 0;
 }
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
 /**
- * cds_cpu_hotplug_notify() - hot plug notify
- * @block: Pointer to block
- * @state: State
- * @hcpu: Pointer to hotplug cpu
+ * cds_cpu_hotplug_multi_cluster() - calls the multi-cluster hotplug handler,
+ *	when on a multi-cluster platform
  *
- * Return: NOTIFY_OK
+ * Return: QDF_STATUS
  */
-static int
-__cds_cpu_hotplug_notify(struct notifier_block *block,
-		       unsigned long state, void *hcpu)
+static QDF_STATUS cds_cpu_hotplug_multi_cluster(void)
 {
-	unsigned long cpu = (unsigned long)hcpu;
-	unsigned long pref_cpu = 0;
-	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
-	int i;
-	unsigned int multi_cluster;
-	unsigned int num_cpus;
-#if defined(WLAN_OPEN_SOURCE) && \
-	 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
 	int cpus;
-#endif
+	unsigned int multi_cluster = 0;
 
+	for_each_online_cpu(cpus) {
+		multi_cluster = topology_physical_package_id(cpus);
+	}
 
-	if ((NULL == pSchedContext) || (NULL == pSchedContext->ol_rx_thread))
-		return NOTIFY_OK;
+	if (!multi_cluster)
+		return QDF_STATUS_E_NOSUPPORT;
 
-	if (cds_is_load_or_unload_in_progress())
-		return NOTIFY_OK;
+	if (cds_sched_handle_cpu_hot_plug())
+		return QDF_STATUS_E_FAILURE;
 
-	num_cpus = num_possible_cpus();
-	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_LOW,
-		  "%s: RX CORE %d, STATE %d, NUM CPUS %d",
-		  __func__, (int)affine_cpu, (int)state, num_cpus);
-	multi_cluster = 0;
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static QDF_STATUS cds_cpu_hotplug_multi_cluster(void)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif /* KERNEL_VERSION(3, 10, 0) */
 
-#if defined(WLAN_OPEN_SOURCE) && \
-	 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+/**
+ * __cds_cpu_hotplug_notify() - CPU hotplug event handler
+ * @cpu: CPU Id of the CPU generating the event
+ * @cpu_up: true if the CPU is online
+ *
+ * Return: None
+ */
+static void __cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
+{
+	unsigned long pref_cpu = 0;
+	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
+	int i;
 
-	for_each_online_cpu(cpus) {
-		multi_cluster =  topology_physical_package_id(cpus);
-	}
-#endif
+	if (!pSchedContext || !pSchedContext->ol_rx_thread)
+		return;
 
-	if ((multi_cluster) &&
-		((CPU_ONLINE == state) || (CPU_DEAD == state))) {
-		cds_sched_handle_cpu_hot_plug();
-		return NOTIFY_OK;
-	}
+	if (cds_is_load_or_unload_in_progress())
+		return;
+
+	cds_debug("'%s' event on CPU %u (of %d); Currently affine to CPU %u",
+		  cpu_up ? "Up" : "Down", cpu, num_possible_cpus(), affine_cpu);
+
+	/* try multi-cluster scheduling first */
+	if (QDF_IS_STATUS_SUCCESS(cds_cpu_hotplug_multi_cluster()))
+		return;
 
-	switch (state) {
-	case CPU_ONLINE:
+	if (cpu_up) {
 		if (affine_cpu != 0)
-			return NOTIFY_OK;
+			return;
 
 		for_each_online_cpu(i) {
 			if (i == 0)
@@ -390,10 +396,9 @@ __cds_cpu_hotplug_notify(struct notifier_block *block,
 			pref_cpu = i;
 			break;
 		}
-		break;
-	case CPU_DEAD:
+	} else {
 		if (cpu != affine_cpu)
-			return NOTIFY_OK;
+			return;
 
 		affine_cpu = 0;
 		for_each_online_cpu(i) {
@@ -405,44 +410,36 @@ __cds_cpu_hotplug_notify(struct notifier_block *block,
 	}
 
 	if (pref_cpu == 0)
-		return NOTIFY_OK;
+		return;
 
 	if (!cds_set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, pref_cpu))
 		affine_cpu = pref_cpu;
-
-	return NOTIFY_OK;
 }
 
 /**
- * vos_cpu_hotplug_notify - cpu core on-off notification handler wrapper
- * @block:	notifier block
- * @state:	state of core
- * @hcpu:	target cpu core
- *
- * pre-registered core status change notify callback function
- * will handle only ONLINE, OFFLINE notification
- * based on cpu architecture, rx thread affinity will be different
- * wrapper function
+ * cds_cpu_hotplug_notify - cpu core up/down notification handler wrapper
+ * @cpu: CPU Id of the CPU generating the event
+ * @cpu_up: true if the CPU is online
  *
- * Return: 0 success
- *         1 fail
+ * Return: None
  */
-static int cds_cpu_hotplug_notify(struct notifier_block *block,
-				unsigned long state, void *hcpu)
+static void cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
 {
-	int ret;
-
 	cds_ssr_protect(__func__);
-	ret = __cds_cpu_hotplug_notify(block, state, hcpu);
+	__cds_cpu_hotplug_notify(cpu, cpu_up);
 	cds_ssr_unprotect(__func__);
+}
 
-	return ret;
+static void cds_cpu_online_cb(void *context, uint32_t cpu)
+{
+	cds_cpu_hotplug_notify(cpu, true);
 }
 
-static struct notifier_block cds_cpu_hotplug_notifier = {
-	.notifier_call = cds_cpu_hotplug_notify,
-};
-#endif
+static void cds_cpu_before_offline_cb(void *context, uint32_t cpu)
+{
+	cds_cpu_hotplug_notify(cpu, false);
+}
+#endif /* QCA_CONFIG_SMP */
 
 /**
  * cds_sched_open() - initialize the CDS Scheduler
@@ -496,8 +493,10 @@ QDF_STATUS cds_sched_open(void *p_cds_context,
 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
 	if (cds_alloc_ol_rx_pkt_freeq(pSchedContext) != QDF_STATUS_SUCCESS)
 		goto pkt_freeqalloc_failure;
-	register_hotcpu_notifier(&cds_cpu_hotplug_notifier);
-	pSchedContext->cpu_hot_plug_notifier = &cds_cpu_hotplug_notifier;
+	qdf_cpuhp_register(&pSchedContext->cpuhp_event_handle,
+			   NULL,
+			   cds_cpu_online_cb,
+			   cds_cpu_before_offline_cb);
 	mutex_init(&pSchedContext->affinity_lock);
 	pSchedContext->high_throughput_required = false;
 #endif
@@ -532,7 +531,7 @@ OL_RX_THREAD_START_FAILURE:
 #endif
 
 #ifdef QCA_CONFIG_SMP
-	unregister_hotcpu_notifier(&cds_cpu_hotplug_notifier);
+	qdf_cpuhp_unregister(&pSchedContext->cpuhp_event_handle);
 	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
 pkt_freeqalloc_failure:
 #endif
@@ -874,8 +873,7 @@ QDF_STATUS cds_sched_close(void)
 	gp_cds_sched_context->ol_rx_thread = NULL;
 	cds_drop_rxpkt_by_staid(gp_cds_sched_context, WLAN_MAX_STA_COUNT);
 	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
-	unregister_hotcpu_notifier(&cds_cpu_hotplug_notifier);
-	gp_cds_sched_context->cpu_hot_plug_notifier = NULL;
+	qdf_cpuhp_unregister(&gp_cds_sched_context->cpuhp_event_handle);
 #endif
 	gp_cds_sched_context = NULL;
 	return QDF_STATUS_SUCCESS;