ソースを参照

qcacmn: Introduce hif_exec_context

hif_exec_context extends hif_ext_groups to support napi and tasklet models.
Some of the rename and enahancements have been done to support merging of
the execution context management code between the CE and DP contexts, as
well as supporting irq affinity for both napi & tasklet contexts.

Change-Id: I82c8abf2e906f027ec80faf7353a7685536bb79b
CRs-Fixed: 2051902
Houston Hoffman 8 年 前
コミット
def86a361b

+ 4 - 5
dp/wifi3.0/dp_main.c

@@ -514,11 +514,10 @@ static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
 
 		}
 
-
-		ret = hif_register_ext_group_int_handler(soc->hif_handle,
-				num_irq, irq_id_map,
-				dp_service_srngs,
-				&soc->intr_ctx[i]);
+		ret = hif_register_ext_group(soc->hif_handle,
+				num_irq, irq_id_map, dp_service_srngs,
+				&soc->intr_ctx[i], "dp_intr",
+				HIF_EXEC_NAPI_TYPE);
 
 		if (ret) {
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,

+ 49 - 18
hif/inc/hif.h

@@ -179,11 +179,24 @@ struct CE_state;
 #endif
 
 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
-
 /* NOTE: "napi->scale" can be changed,
  * but this does not change the number of buckets
  */
 #define QCA_NAPI_NUM_BUCKETS 4
+/**
+ * qca_napi_stat - stats structure for execution contexts
+ * @napi_schedules - number of times the schedule function is called
+ * @napi_polls - number of times the execution context runs
+ * @napi_completes - number of times that the generating interrupt is reenabled
+ * @napi_workdone - cumulative of all work done reported by handler
+ * @cpu_corrected - incremented when execution context runs on a different core
+ *			than the one that its irq is affined to.
+ * @napi_budget_uses - histogram of work done per execution run
+ * @time_limit_reache - count of yields due to time limit threshholds
+ * @rxpkt_thresh_reached - count of yields due to a work limit
+ *
+ * needs to be renamed
+ */
 struct qca_napi_stat {
 	uint32_t napi_schedules;
 	uint32_t napi_polls;
@@ -195,6 +208,7 @@ struct qca_napi_stat {
 	uint32_t rxpkt_thresh_reached;
 };
 
+
 /**
  * per NAPI instance data structure
  * This data structure holds stuff per NAPI instance.
@@ -217,6 +231,16 @@ struct qca_napi_info {
 	qdf_spinlock_t lro_unloading_lock;
 };
 
+enum qca_napi_tput_state {
+	QCA_NAPI_TPUT_UNINITIALIZED,
+	QCA_NAPI_TPUT_LO,
+	QCA_NAPI_TPUT_HI
+};
+enum qca_napi_cpu_state {
+	QCA_NAPI_CPU_UNINITIALIZED,
+	QCA_NAPI_CPU_DOWN,
+	QCA_NAPI_CPU_UP };
+
 /**
  * struct qca_napi_cpu - an entry of the napi cpu table
  * @core_id:     physical core id of the core
@@ -226,6 +250,7 @@ struct qca_napi_info {
  * @max_freq:    maximum clock this core can be clocked at
  *               same for all cpus of the same core.
  * @napis:       bitmap of napi instances on this core
+ * @execs:       bitmap of execution contexts on this core
  * cluster_nxt:  chain to link cores within the same cluster
  *
  * This structure represents a single entry in the napi cpu
@@ -235,15 +260,6 @@ struct qca_napi_info {
  * notifier and when cpu affinity decisions are made (by throughput
  * detection), and deleted when the last napi instance is removed.
  */
-enum qca_napi_tput_state {
-	QCA_NAPI_TPUT_UNINITIALIZED,
-	QCA_NAPI_TPUT_LO,
-	QCA_NAPI_TPUT_HI
-};
-enum qca_napi_cpu_state {
-	QCA_NAPI_CPU_UNINITIALIZED,
-	QCA_NAPI_CPU_DOWN,
-	QCA_NAPI_CPU_UP };
 struct qca_napi_cpu {
 	enum qca_napi_cpu_state state;
 	int			core_id;
@@ -252,22 +268,32 @@ struct qca_napi_cpu {
 	cpumask_t		thread_mask;
 	unsigned int		max_freq;
 	uint32_t		napis;
+	uint32_t		execs;
 	int			cluster_nxt;  /* index, not pointer */
 };
 
 /**
- * NAPI data-structure common to all NAPI instances.
- *
- * A variable of this type will be stored in hif module context.
+ * struct qca_napi_data - collection of napi data for a single hif context
+ * @hif_softc: pointer to the hif context
+ * @lock: spinlock used in the event state machine
+ * @state: state variable used in the napi stat machine
+ * @ce_map: bit map indicating which ce's have napis running
+ * @exec_map: bit map of instanciated exec contexts
+ * @napi_cpu: cpu info for irq affinty
+ * @lilcl_head:
+ * @bigcl_head:
+ * @napi_mode: irq affinity & clock voting mode
  */
 struct qca_napi_data {
-	qdf_spinlock_t           lock;
+	struct               hif_softc *hif_softc;
+	qdf_spinlock_t       lock;
 	uint32_t             state;
 
 	/* bitmap of created/registered NAPI instances, indexed by pipe_id,
 	 * not used by clients (clients use an id returned by create)
 	 */
 	uint32_t             ce_map;
+	uint32_t             exec_map;
 	struct qca_napi_info *napis[CE_COUNT_MAX];
 	struct qca_napi_cpu  napi_cpu[NR_CPUS];
 	int                  lilcl_head, bigcl_head;
@@ -495,7 +521,6 @@ QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
 			uint32_t address, uint8_t *data, int nbytes);
 
 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
-typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
 
 /*
  * Set the FASTPATH_mode_on flag in sc, for use by data path
@@ -854,11 +879,17 @@ void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
 
 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
 
-uint32_t hif_register_ext_group_int_handler(struct hif_opaque_softc *hif_ctx,
-		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
-		void *context);
+enum hif_exec_type {
+	HIF_EXEC_NAPI_TYPE,
+	HIF_EXEC_TASKLET_TYPE,
+};
 
+typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
+uint32_t  hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
+		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
+		void *cb_ctx, const char *context_name,
+		enum hif_exec_type type);
 
 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
 				u_int8_t pipeid,

+ 10 - 12
hif/src/ce/ce_main.h

@@ -32,6 +32,7 @@
 #include "qdf_lock.h"
 #include "hif_main.h"
 #include "qdf_util.h"
+#include "hif_exec.h"
 
 #define CE_HTT_T2H_MSG 1
 #define CE_HTT_H2T_MSG 4
@@ -114,17 +115,14 @@ struct ce_tasklet_entry {
 	void *hif_ce_state;
 };
 
-struct hif_ext_group_entry {
-	uint32_t numirq;
-	uint32_t irq[HIF_MAX_GRP_IRQ];
-	uint32_t grp_id;
-	void *context;
-	ext_intr_handler handler;
-	struct tasklet_struct intr_tq;
-	bool configured;
-	bool inited;
-	void *hif_state;
-};
+static inline bool hif_dummy_grp_done(struct hif_exec_context *grp_entry, int
+				      work_done)
+{
+	return true;
+}
+
+extern struct hif_execution_ops tasklet_sched_ops;
+extern struct hif_execution_ops napi_sched_ops;
 
 struct ce_stats {
 	uint32_t ce_per_cpu[CE_COUNT_MAX][QDF_MAX_AVAILABLE_CPU];
@@ -134,7 +132,7 @@ struct HIF_CE_state {
 	struct hif_softc ol_sc;
 	bool started;
 	struct ce_tasklet_entry tasklets[CE_COUNT_MAX];
-	struct hif_ext_group_entry hif_ext_group[HIF_MAX_GROUP];
+	struct hif_exec_context *hif_ext_group[HIF_MAX_GROUP];
 	uint32_t hif_num_extgroup;
 	qdf_spinlock_t keep_awake_lock;
 	qdf_spinlock_t irq_reg_lock;

+ 5 - 3
hif/src/dispatcher/ahb_api.h

@@ -18,6 +18,7 @@
 
 #ifndef __AHB_API_H
 #define __AHB_API_H
+struct hif_exec_context;
 
 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx,
 			 enum qdf_bus_type bus_type);
@@ -35,8 +36,8 @@ void hif_ahb_disable_bus(struct hif_softc *scn);
 int hif_ahb_bus_configure(struct hif_softc *scn);
 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id);
 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id);
-void hif_ahb_grp_irq_disable(struct hif_softc *scn, uint32_t grp_id);
-void hif_ahb_grp_irq_enable(struct hif_softc *scn, uint32_t grp_id);
+void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_grp);
+void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_grp);
 int hif_ahb_dump_registers(struct hif_softc *scn);
 
 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc);
@@ -46,6 +47,7 @@ int hif_ahb_enable_radio(struct hif_pci_softc *sc,
 		struct platform_device *pdev,
 		const struct platform_device_id *id);
 int hif_ahb_configure_irq(struct hif_pci_softc *sc);
-int hif_ahb_configure_grp_irq(struct hif_softc *scn);
+int hif_ahb_configure_grp_irq(struct hif_softc *scn,
+			      struct hif_exec_context *hif_ext_grp);
 
 #endif

+ 3 - 2
hif/src/dispatcher/dummy.c

@@ -265,7 +265,8 @@ void hif_dummy_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id)
  *
  * Return: none
  */
-int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc)
+int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc,
+				struct hif_exec_context *exec)
 {
     return 0;
 }
@@ -274,7 +275,7 @@ int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc)
  * hif_dummy_dump_registers - dummy call
  * hif_sc: hif context
  *
- * Return: 0 for sucess
+ * Return: 0 for success
  */
 int hif_dummy_dump_registers(struct hif_softc *hif_sc)
 {

+ 3 - 1
hif/src/dispatcher/dummy.h

@@ -25,6 +25,7 @@
  * to the Linux Foundation.
  */
 struct hif_softc;
+struct hif_exec_context;
 
 void hif_dummy_bus_prevent_linkdown(struct hif_softc *scn, bool flag);
 void hif_dummy_reset_soc(struct hif_softc *scn);
@@ -49,7 +50,8 @@ void hif_dummy_irq_enable(struct hif_softc *hif_sc, int irq_id);
 void hif_dummy_irq_disable(struct hif_softc *hif_sc, int irq_id);
 void hif_dummy_grp_irq_enable(struct hif_softc *hif_sc, uint32_t grp_id);
 void hif_dummy_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id);
-int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc);
+int hif_dummy_grp_irq_configure(struct hif_softc *hif_sc,
+				struct hif_exec_context *exec);
 int hif_dummy_dump_registers(struct hif_softc *hif_sc);
 void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base,
 				  uint32_t address, uint32_t size);

+ 3 - 14
hif/src/dispatcher/multibus.c

@@ -64,8 +64,6 @@ static void hif_intialize_default_ops(struct hif_softc *hif_sc)
 	bus_ops->hif_bus_resume_noirq = &hif_dummy_bus_resume_noirq;
 	bus_ops->hif_bus_early_suspend = &hif_dummy_bus_suspend;
 	bus_ops->hif_bus_late_resume = &hif_dummy_bus_resume;
-	bus_ops->hif_grp_irq_disable = &hif_dummy_grp_irq_disable;
-	bus_ops->hif_grp_irq_enable = &hif_dummy_grp_irq_enable;
 	bus_ops->hif_map_ce_to_irq = &hif_dummy_map_ce_to_irq;
 	bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure;
 }
@@ -321,24 +319,15 @@ void hif_irq_enable(struct hif_softc *hif_sc, int irq_id)
 	hif_sc->bus_ops.hif_irq_enable(hif_sc, irq_id);
 }
 
-void hif_grp_irq_enable(struct hif_softc *hif_sc, uint32_t grp_id)
-{
-	hif_sc->bus_ops.hif_grp_irq_enable(hif_sc, grp_id);
-}
-
 void hif_irq_disable(struct hif_softc *hif_sc, int irq_id)
 {
 	hif_sc->bus_ops.hif_irq_disable(hif_sc, irq_id);
 }
 
-void hif_grp_irq_disable(struct hif_softc *hif_sc, uint32_t grp_id)
-{
-	hif_sc->bus_ops.hif_grp_irq_disable(hif_sc, grp_id);
-}
-
-int hif_grp_irq_configure(struct hif_softc *hif_sc)
+int hif_grp_irq_configure(struct hif_softc *hif_sc,
+			  struct hif_exec_context *hif_exec)
 {
-	return hif_sc->bus_ops.hif_grp_irq_configure(hif_sc);
+	return hif_sc->bus_ops.hif_grp_irq_configure(hif_sc, hif_exec);
 }
 
 int hif_dump_registers(struct hif_opaque_softc *hif_hdl)

+ 3 - 3
hif/src/dispatcher/multibus.h

@@ -33,6 +33,7 @@
 #include "hif_debug.h"
 
 struct hif_softc;
+struct hif_exec_context;
 
 struct hif_bus_ops {
 	QDF_STATUS (*hif_bus_open)(struct hif_softc *hif_sc,
@@ -65,10 +66,9 @@ struct hif_bus_ops {
 	void (*hif_stop)(struct hif_softc *hif_sc);
 	void (*hif_cancel_deferred_target_sleep)(struct hif_softc *hif_sc);
 	void (*hif_irq_disable)(struct hif_softc *hif_sc, int ce_id);
-	void (*hif_grp_irq_disable)(struct hif_softc *hif_sc, uint32_t grp_id);
 	void (*hif_irq_enable)(struct hif_softc *hif_sc, int ce_id);
-	void (*hif_grp_irq_enable)(struct hif_softc *hif_sc, uint32_t grp_id);
-	int (*hif_grp_irq_configure)(struct hif_softc *hif_sc);
+	int (*hif_grp_irq_configure)(struct hif_softc *hif_sc,
+				     struct hif_exec_context *exec);
 	int (*hif_dump_registers)(struct hif_softc *hif_sc);
 	void (*hif_dump_target_memory)(struct hif_softc *hif_sc,
 				       void *ramdump_base,

+ 0 - 2
hif/src/dispatcher/multibus_ahb.c

@@ -66,8 +66,6 @@ QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops)
 		&hif_dummy_enable_power_management;
 	bus_ops->hif_disable_power_management =
 		&hif_dummy_disable_power_management;
-	bus_ops->hif_grp_irq_disable = &hif_ahb_grp_irq_disable;
-	bus_ops->hif_grp_irq_enable = &hif_ahb_grp_irq_enable;
 	bus_ops->hif_grp_irq_configure = &hif_ahb_configure_grp_irq;
 
 	return QDF_STATUS_SUCCESS;

+ 0 - 3
hif/src/dispatcher/multibus_pci.c

@@ -87,9 +87,6 @@ QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc)
 		&hif_pci_display_stats;
 	bus_ops->hif_clear_stats =
 		&hif_pci_clear_stats;
-	bus_ops->hif_grp_irq_disable = &hif_dummy_grp_irq_disable;
-	bus_ops->hif_grp_irq_enable = &hif_dummy_grp_irq_enable;
-	bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure;
 
 	/* default to legacy mapping handler; override as needed */
 	bus_ops->hif_map_ce_to_irq = &hif_pci_legacy_map_ce_to_irq;

+ 0 - 3
hif/src/dispatcher/multibus_sdio.c

@@ -64,9 +64,6 @@ QDF_STATUS hif_initialize_sdio_ops(struct hif_softc *hif_sc)
 		&hif_dummy_enable_power_management;
 	bus_ops->hif_disable_power_management =
 		&hif_dummy_disable_power_management;
-	bus_ops->hif_grp_irq_disable = &hif_dummy_grp_irq_disable;
-	bus_ops->hif_grp_irq_enable = &hif_dummy_grp_irq_enable;
-	bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure;
 
 	return QDF_STATUS_SUCCESS;
 }

+ 0 - 3
hif/src/dispatcher/multibus_snoc.c

@@ -81,10 +81,7 @@ QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *bus_ops)
 		&hif_snoc_display_stats;
 	bus_ops->hif_clear_stats =
 		&hif_snoc_clear_stats;
-	bus_ops->hif_grp_irq_disable = &hif_dummy_grp_irq_disable;
-	bus_ops->hif_grp_irq_enable = &hif_dummy_grp_irq_enable;
 	bus_ops->hif_map_ce_to_irq = &hif_snoc_map_ce_to_irq;
-	bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure;
 
 	return QDF_STATUS_SUCCESS;
 }

+ 0 - 1
hif/src/dispatcher/multibus_usb.c

@@ -64,7 +64,6 @@ QDF_STATUS hif_initialize_usb_ops(struct hif_bus_ops *bus_ops)
 			&hif_dummy_disable_power_management;
 	bus_ops->hif_set_bundle_mode = hif_usb_set_bundle_mode;
 	bus_ops->hif_bus_reset_resume = hif_usb_bus_reset_resume;
-	bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure;
 
 	return QDF_STATUS_SUCCESS;
 }

+ 360 - 0
hif/src/hif_exec.c

@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <hif_exec.h>
+#include <ce_main.h>
+
+
+static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
+{
+	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
+
+	tasklet_schedule(&t_ctx->tasklet);
+}
+
+/**
+ * hif_exec_tasklet() - grp tasklet
+ * data: context
+ *
+ * return: void
+ */
+static void hif_exec_tasklet_fn(unsigned long data)
+{
+	struct hif_exec_context *hif_ext_group =
+			(struct hif_exec_context *)data;
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
+	unsigned int work_done;
+
+	work_done =
+		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
+
+	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
+		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
+		hif_ext_group->irq_enable(hif_ext_group);
+	} else {
+		hif_exec_tasklet_schedule(hif_ext_group);
+	}
+}
+
+/**
+ * hif_exec_poll() - grp tasklet
+ * data: context
+ *
+ * return: void
+ */
+static int hif_exec_poll(struct napi_struct *napi, int budget)
+{
+	struct hif_napi_exec_context *exec_ctx =
+		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
+	struct hif_exec_context *hif_ext_group = &exec_ctx->exec_ctx;
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
+	int work_done;
+
+	work_done = hif_ext_group->handler(hif_ext_group->context, budget);
+
+	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
+		if (work_done >= budget)
+			work_done = budget - 1;
+
+		napi_complete(napi);
+		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
+		hif_ext_group->irq_enable(hif_ext_group);
+	} else {
+		/* if the ext_group supports time based yield, claim full work
+		 * done anyways */
+		work_done = budget;
+	}
+
+	return work_done;
+}
+
+/**
+ * hif_exec_napi_schedule() - schedule the napi exec instance
+ * @ctx: a hif_exec_context known to be of napi type
+ */
+static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
+{
+	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
+
+	napi_schedule(&n_ctx->napi);
+}
+
+/**
+ * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
+ * @ctx: a hif_exec_context known to be of napi type
+ */
+static void hif_exec_napi_kill(struct hif_exec_context *ctx)
+{
+	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
+
+	if (ctx->inited) {
+		napi_disable(&n_ctx->napi);
+		ctx->inited = 0;
+	}
+}
+
+struct hif_execution_ops napi_sched_ops = {
+	.schedule = &hif_exec_napi_schedule,
+	.kill = &hif_exec_napi_kill,
+};
+
+#ifdef FEATURE_NAPI
+/**
+ * hif_exec_napi_create() - allocate and initialize a napi exec context
+ */
+static struct hif_exec_context *hif_exec_napi_create(void)
+{
+	struct hif_napi_exec_context *ctx;
+
+	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
+	if (ctx == NULL)
+		return NULL;
+
+	ctx->exec_ctx.sched_ops = &napi_sched_ops;
+	ctx->exec_ctx.inited = true;
+	init_dummy_netdev(&(ctx->netdev));
+	netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
+		       QCA_NAPI_BUDGET);
+	napi_enable(&ctx->napi);
+
+	return &ctx->exec_ctx;
+}
+#else
+static struct hif_exec_context *hif_exec_napi_create(void)
+{
+	HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet");
+	return hif_exec_tasklet_create();
+}
+#endif
+
+
+/**
+ * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
+ * @ctx: a hif_exec_context known to be of tasklet type
+ */
+static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
+{
+	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
+
+	if (ctx->inited) {
+		tasklet_disable(&t_ctx->tasklet);
+		tasklet_kill(&t_ctx->tasklet);
+	}
+	ctx->inited = false;
+}
+
+struct hif_execution_ops tasklet_sched_ops = {
+	.schedule = &hif_exec_tasklet_schedule,
+	.kill = &hif_exec_tasklet_kill,
+};
+
+/**
+ * hif_exec_tasklet_schedule() -  allocate and initialize a tasklet exec context
+ */
+static struct hif_exec_context *hif_exec_tasklet_create(void)
+{
+	struct hif_tasklet_exec_context *ctx;
+
+	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
+	if (ctx == NULL)
+		return NULL;
+
+	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
+	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
+		     (unsigned long)ctx);
+
+	ctx->exec_ctx.inited = true;
+
+	return &ctx->exec_ctx;
+}
+
+/**
+ * hif_exec_get_ctx() - retrieve an exec context based on an id
+ * @softc: the hif context owning the exec context
+ * @id: the id of the exec context
+ *
+ * mostly added to make it easier to rename or move the context array
+ */
+struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
+					  uint8_t id)
+{
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
+
+	if (id < hif_state->hif_num_extgroup)
+		return hif_state->hif_ext_group[id];
+
+	return NULL;
+}
+
+/**
+ * hif_configure_ext_group_interrupts() - API to configure external group
+ * interrpts
+ * @hif_ctx : HIF Context
+ *
+ * Return: status
+ */
+uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
+	struct hif_exec_context *hif_ext_group;
+	int i, status;
+
+	if (scn->ext_grp_irq_configured) {
+		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
+		hif_ext_group = hif_state->hif_ext_group[i];
+		status = 0;
+		if (hif_ext_group->configured &&
+		    hif_ext_group->irq_requested == false)
+			status = hif_grp_irq_configure(scn, hif_ext_group);
+		if (status != 0)
+			HIF_ERROR("%s: failed for group %d", __func__, i);
+	}
+
+	scn->ext_grp_irq_configured = true;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_ext_group_interrupt_handler() - handler for related interrupts
+ * @irq: irq number of the interrupt
+ * @context: the associated hif_exec_group context
+ *
+ * This callback function takes care of dissabling the associated interrupts
+ * and scheduling the expected bottom half for the exec_context.
+ * This callback function also helps keep track of the count running contexts.
+ */
+irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
+{
+	struct hif_exec_context *hif_ext_group = context;
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
+
+
+	hif_ext_group->irq_disable(hif_ext_group);
+	qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
+
+	hif_ext_group->sched_ops->schedule(hif_ext_group);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * hif_exec_kill() - grp tasklet kill
+ * scn: hif_softc
+ *
+ * return: void
+ */
+void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
+{
+	int i;
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
+
+	for (i = 0; i < hif_state->hif_num_extgroup; i++)
+		hif_state->hif_ext_group[i]->sched_ops->kill(
+			hif_state->hif_ext_group[i]);
+
+	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
+}
+
+/**
+ * hif_register_ext_group() - API to register external group
+ * interrupt handler.
+ * @hif_ctx : HIF Context
+ * @numirq: number of irq's in the group
+ * @irq: array of irq values
+ * @handler: callback interrupt handler function
+ * @cb_ctx: context to passed in callback
+ * @type: napi vs tasklet
+ *
+ * Return: status
+ */
+uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
+		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
+		void *cb_ctx, const char *context_name, enum hif_exec_type type)
+{
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct hif_exec_context *hif_ext_group;
+
+	if (scn->ext_grp_irq_configured) {
+		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
+		HIF_ERROR("%s Max groups reached\n", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (numirq >= HIF_MAX_GRP_IRQ) {
+		HIF_ERROR("%s invalid numirq\n", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	hif_ext_group = hif_exec_create(type);
+	if (hif_ext_group == NULL)
+		return QDF_STATUS_E_FAILURE;
+
+	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
+		hif_ext_group;
+
+	hif_ext_group->numirq = numirq;
+	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
+	hif_ext_group->context = cb_ctx;
+	hif_ext_group->handler = handler;
+	hif_ext_group->configured = true;
+	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
+	hif_ext_group->hif = hif_ctx;
+	hif_ext_group->context_name = context_name;
+
+	hif_state->hif_num_extgroup++;
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * hif_exec_create() - create an execution context
+ * @type: the type of execution context to create
+ */
+struct hif_exec_context *hif_exec_create(enum hif_exec_type type)
+{
+	switch (type) {
+	case HIF_EXEC_NAPI_TYPE:
+		return hif_exec_napi_create();
+
+	case HIF_EXEC_TASKLET_TYPE:
+		return hif_exec_tasklet_create();
+	default:
+		return NULL;
+	}
+}
+
+/**
+ * hif_exec_destroy() - free the hif_exec context
+ * @ctx: context to free
+ *
+ * please kill the context before freeing it to avoid a use after free.
+ */
+void hif_exec_destroy(struct hif_exec_context *ctx)
+{
+	qdf_mem_free(ctx);
+}

+ 120 - 0
hif/src/hif_exec.h

@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __HIF_EXEC_H__
+#define __HIF_EXEC_H__
+
+#include <hif.h>
+
+
+struct hif_exec_context;
+
+struct hif_execution_ops {
+	char *context_type;
+	void (*schedule)(struct hif_exec_context *);
+	void (*reschedule)(struct hif_exec_context *);
+	void (*kill)(struct hif_exec_context *);
+};
+
+/**
+ * hif_exec_context: only ever allocated as a subtype eg.
+ *					hif_tasklet_exec_context
+ *
+ * @context: context for the handler function to use.
+ * @context_name: a pointer to a const string for debugging.
+ *		this should help whenever there could be ambiguity
+ *		in what type of context the void* context points to
+ * @irq: irq handle coresponding to hw block
+ * @os_irq: irq handle for irq_afinity
+ * @cpu: the cpu this context should be affined to
+ * @work_complete: Function call called when leaving the execution context to
+ *	determine if this context should reschedule or wait for an interrupt.
+ *	This function may be used as a hook for post processing.
+ *
+ * @irq_disable: called before scheduling the context.
+ * @irq_enable: called when the context leaves polling mode
+ */
+struct hif_exec_context {
+	struct hif_execution_ops *sched_ops;
+	struct hif_opaque_softc *hif;
+	uint32_t numirq;
+	uint32_t irq[HIF_MAX_GRP_IRQ];
+	uint32_t os_irq[HIF_MAX_GRP_IRQ];
+	uint32_t grp_id;
+	const char *context_name;
+	void *context;
+	ext_intr_handler handler;
+
+	bool (*work_complete)(struct hif_exec_context *, int work_done);
+	void (*irq_enable)(struct hif_exec_context *);
+	void (*irq_disable)(struct hif_exec_context *);
+
+	uint8_t cpu;
+	struct qca_napi_stat stats[NR_CPUS];
+	bool inited;
+	bool configured;
+	bool irq_requested;
+};
+
+/**
+ * struct hif_tasklet_exec_context - exec_context for tasklets
+ * @exec_ctx: inherited data type
+ * @tasklet: tasklet structure for scheduling
+ */
+struct hif_tasklet_exec_context {
+	struct hif_exec_context exec_ctx;
+	struct tasklet_struct tasklet;
+};
+
+/**
+ * struct hif_napi_exec_context - exec_context for tasklets
+ * @exec_ctx: inherited data type
+ * @netdev: dummy net device associated with the napi context
+ * @napi: napi structure used in scheduling
+ */
+struct hif_napi_exec_context {
+	struct hif_exec_context exec_ctx;
+	struct net_device    netdev; /* dummy net_dev */
+	struct napi_struct   napi;
+};
+
+static inline struct hif_napi_exec_context*
+	hif_exec_get_napi(struct hif_exec_context *ctx)
+{
+	return (struct hif_napi_exec_context *) ctx;
+}
+
+static inline struct hif_tasklet_exec_context*
+	hif_exec_get_tasklet(struct hif_exec_context *ctx)
+{
+	return (struct hif_tasklet_exec_context *) ctx;
+}
+
+struct hif_exec_context *hif_exec_create(enum hif_exec_type type);
+void hif_exec_destroy(struct hif_exec_context *ctx);
+
+int hif_grp_irq_configure(struct hif_softc *scn,
+			  struct hif_exec_context *hif_exec);
+irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context);
+
+struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif,
+					  uint8_t id);
+void hif_exec_kill(struct hif_opaque_softc *scn);
+
+#endif
+

+ 0 - 3
hif/src/hif_io32.h

@@ -104,8 +104,5 @@ void hif_target_write_checked(struct hif_softc *scn, uint32_t offset,
 
 void hif_irq_enable(struct hif_softc *scn, int irq_id);
 void hif_irq_disable(struct hif_softc *scn, int irq_id);
-void hif_grp_irq_enable(struct hif_softc *scn, uint32_t grp_id);
-void hif_grp_irq_disable(struct hif_softc *scn, uint32_t grp_id);
-int hif_grp_irq_configure(struct hif_softc *scn);
 
 #endif /* __HIF_IO32_H__ */

+ 0 - 112
hif/src/hif_main.c

@@ -1165,115 +1165,3 @@ void hif_ramdump_handler(struct hif_opaque_softc *scn)
 }
 #endif
 
-/**
- * hif_register_ext_group_int_handler() - API to register external group
- * interrupt handler.
- * @hif_ctx : HIF Context
- * @numirq: number of irq's in the group
- * @irq: array of irq values
- * @ext_intr_handler: callback interrupt handler function
- * @context: context to passed in callback
- *
- * Return: status
- */
-uint32_t hif_register_ext_group_int_handler(struct hif_opaque_softc *hif_ctx,
-		uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
-		void *context)
-{
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct hif_ext_group_entry *hif_ext_group;
-
-	if (scn->ext_grp_irq_configured) {
-		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
-		HIF_ERROR("%s Max groups reached\n", __func__);
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	if (numirq >= HIF_MAX_GRP_IRQ) {
-		HIF_ERROR("%s invalid numirq\n", __func__);
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	hif_ext_group = &hif_state->hif_ext_group[hif_state->hif_num_extgroup];
-
-	hif_ext_group->numirq = numirq;
-	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
-	hif_ext_group->context = context;
-	hif_ext_group->handler = handler;
-	hif_ext_group->configured = true;
-	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
-	hif_ext_group->hif_state = hif_state;
-
-	hif_state->hif_num_extgroup++;
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * hif_configure_ext_group_interrupts() - API to configure external group
- * interrpts
- * @hif_ctx : HIF Context
- *
- * Return: status
- */
-uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
-{
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
-
-	if (scn->ext_grp_irq_configured) {
-		HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	hif_grp_irq_configure(scn);
-	scn->ext_grp_irq_configured = true;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * hif_ext_grp_tasklet() - grp tasklet
- * data: context
- *
- * return: void
- */
-void hif_ext_grp_tasklet(unsigned long data)
-{
-	struct hif_ext_group_entry *hif_ext_group =
-			(struct hif_ext_group_entry *)data;
-	struct HIF_CE_state *hif_state = hif_ext_group->hif_state;
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
-
-	if (hif_ext_group->grp_id < HIF_MAX_GROUP) {
-		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
-		hif_grp_irq_enable(scn, hif_ext_group->grp_id);
-	} else {
-		HIF_ERROR("%s: ERROR - invalid grp_id = %d",
-		       __func__, hif_ext_group->grp_id);
-	}
-
-	qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
-}
-
-/**
- * hif_grp_tasklet_kill() - grp tasklet kill
- * scn: hif_softc
- *
- * return: void
- */
-void hif_grp_tasklet_kill(struct hif_softc *scn)
-{
-	int i;
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-
-	for (i = 0; i < HIF_MAX_GROUP; i++)
-		if (hif_state->hif_ext_group[i].inited) {
-			tasklet_kill(&hif_state->hif_ext_group[i].intr_tq);
-			hif_state->hif_ext_group[i].inited = false;
-		}
-	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
-}

+ 0 - 3
hif/src/hif_main.h

@@ -247,7 +247,4 @@ void hif_ramdump_handler(struct hif_opaque_softc *scn);
 static inline void hif_usb_get_hw_info(struct hif_softc *scn) {}
 static inline void hif_ramdump_handler(struct hif_opaque_softc *scn) {}
 #endif
-void hif_ext_grp_tasklet(unsigned long data);
-void hif_grp_tasklet_kill(struct hif_softc *scn);
-
 #endif /* __HIF_MAIN_H__ */

+ 2 - 1
hif/src/pcie/if_pci.c

@@ -2392,6 +2392,7 @@ irq_handled:
 
 }
 
+/* deprecated */
 static int hif_configure_msi(struct hif_pci_softc *sc)
 {
 	int ret = 0;
@@ -3197,11 +3198,11 @@ void hif_pci_disable_isr(struct hif_softc *scn)
 {
 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
 
+	hif_exec_kill(&scn->osc);
 	hif_nointrs(scn);
 	hif_free_msi_ctx(scn);
 	/* Cancel the pending tasklet */
 	ce_tasklet_kill(scn);
-	hif_grp_tasklet_kill(scn);
 	tasklet_kill(&sc->intr_tq);
 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);

+ 35 - 69
hif/src/snoc/if_ahb.c

@@ -113,9 +113,9 @@ void hif_ahb_disable_isr(struct hif_softc *scn)
 {
 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
 
+	hif_exec_kill(&scn->osc);
 	hif_nointrs(scn);
 	ce_tasklet_kill(scn);
-	hif_grp_tasklet_kill(scn);
 	tasklet_kill(&sc->intr_tq);
 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
@@ -273,45 +273,39 @@ end:
 	return ret;
 }
 
-int hif_ahb_configure_grp_irq(struct hif_softc *scn)
+int hif_ahb_configure_grp_irq(struct hif_softc *scn,
+			      struct hif_exec_context *hif_ext_group)
 {
 	int ret = 0;
 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct hif_ext_group_entry *hif_ext_group;
 	int irq = 0;
-	int i, j;
+	const char *irq_name;
+	int j;
 
 	/* configure external interrupts */
-	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
+	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
+	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
+	hif_ext_group->work_complete = &hif_dummy_grp_done;
 
-		hif_ext_group = &hif_state->hif_ext_group[i];
-		if (hif_ext_group->configured) {
+	hif_ext_group->irq_requested = true;
 
-			tasklet_init(&hif_ext_group->intr_tq,
-					hif_ext_grp_tasklet,
-					(unsigned long)hif_ext_group);
-			hif_ext_group->inited = true;
+	for (j = 0; j < hif_ext_group->numirq; j++) {
+		irq_name = ic_irqname[hif_ext_group->irq[j]];
+		irq = platform_get_irq_byname(pdev, irq_name);
 
-			for (j = 0; j < hif_ext_group->numirq; j++) {
-				irq = platform_get_irq_byname(pdev,
-					ic_irqname[hif_ext_group->irq[j]]);
-
-				ic_irqnum[hif_ext_group->irq[j]] = irq;
-				ret = request_irq(irq,
-					hif_ext_group_ahb_interrupt_handler,
-						IRQF_TRIGGER_RISING,
-						ic_irqname[hif_ext_group->irq[j]],
-						hif_ext_group);
-				if (ret) {
-					dev_err(&pdev->dev,
-						"ath_request_irq failed\n");
-					ret = -1;
-					goto end;
-				}
-			}
+		ic_irqnum[hif_ext_group->irq[j]] = irq;
+		ret = request_irq(irq, hif_ext_group_interrupt_handler,
+				  IRQF_TRIGGER_RISING,
+				  ic_irqname[hif_ext_group->irq[j]],
+				  hif_ext_group);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"ath_request_irq failed\n");
+			ret = -1;
+			goto end;
 		}
+		hif_ext_group->os_irq[j] = irq;
 	}
 
 end:
@@ -321,16 +315,16 @@ end:
 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
 {
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct hif_ext_group_entry *hif_ext_group;
+	struct hif_exec_context *hif_ext_group;
 	int i, j;
 
 	/* configure external interrupts */
 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
-		hif_ext_group = &hif_state->hif_ext_group[i];
-		if (hif_ext_group->inited == true) {
-			hif_ext_group->inited = false;
+		hif_ext_group = hif_state->hif_ext_group[i];
+		if (hif_ext_group->irq_requested == true) {
+			hif_ext_group->irq_requested = false;
 			for (j = 0; j < hif_ext_group->numirq; j++) {
-				free_irq(ic_irqnum[hif_ext_group->irq[j]],
+				free_irq(hif_ext_group->os_irq[j],
 						hif_ext_group);
 			}
 		}
@@ -343,27 +337,6 @@ irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
 }
 
-irqreturn_t hif_ext_group_ahb_interrupt_handler(int irq, void *context)
-{
-	struct hif_ext_group_entry *hif_ext_group = context;
-	struct HIF_CE_state *hif_state = hif_ext_group->hif_state;
-	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
-	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
-	uint32_t grp_id = hif_ext_group->grp_id;
-
-	hif_grp_irq_disable(scn, grp_id);
-
-	qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
-
-	if (hif_ext_napi_enabled(hif_hdl, grp_id)) {
-		hif_napi_schedule_grp(hif_hdl, grp_id);
-	} else {
-		tasklet_schedule(&hif_ext_group->intr_tq);
-	}
-
-	return IRQ_HANDLED;
-}
-
 /**
  * hif_target_sync() : ensure the target is ready
  * @scn: hif control structure
@@ -710,28 +683,21 @@ void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
 	}
 }
 
-void hif_ahb_grp_irq_disable(struct hif_softc *scn, uint32_t grp_id)
+void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
 {
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct hif_ext_group_entry *hif_ext_group;
-	uint32_t i;
-
-	hif_ext_group = &hif_state->hif_ext_group[grp_id];
+	int i;
 
 	for (i = 0; i < hif_ext_group->numirq; i++) {
-		disable_irq_nosync(ic_irqnum[hif_ext_group->irq[i]]);
+		disable_irq_nosync(hif_ext_group->os_irq[i]);
 	}
 }
 
-void hif_ahb_grp_irq_enable(struct hif_softc *scn, uint32_t grp_id)
+void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
 {
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct hif_ext_group_entry *hif_ext_group;
-	uint32_t i;
-
-	hif_ext_group = &hif_state->hif_ext_group[grp_id];
+	int i;
 
 	for (i = 0; i < hif_ext_group->numirq; i++) {
-		enable_irq(ic_irqnum[hif_ext_group->irq[i]]);
+		enable_irq(hif_ext_group->os_irq[i]);
 	}
 }
+

+ 1 - 2
hif/src/snoc/if_ahb.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -47,7 +47,6 @@
 #define ATH_AHB_RESET_WAIT_MAX 10 /* Ms */
 
 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context);
-irqreturn_t hif_ext_group_ahb_interrupt_handler(int irq, void *context);
 
 #endif
 

+ 1 - 1
hif/src/snoc/if_snoc.c

@@ -56,9 +56,9 @@
  */
 void hif_snoc_disable_isr(struct hif_softc *scn)
 {
+	hif_exec_kill(&scn->osc);
 	hif_nointrs(scn);
 	ce_tasklet_kill(scn);
-	hif_grp_tasklet_kill(scn);
 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
 }