Browse Source

msm: gsi: add support for 2 new MSI interrupts

Add support for 2 new seperate MSI interrupts
to pin rmnet_ll and rmnet_ctl processing to seperate
CPUs.

Change-Id: I83977081a72d734622525732a97f8563fb530ade
Signed-off-by: Michael Adisumarta <[email protected]>
Michael Adisumarta 3 years ago
parent
commit
db86553f4e

+ 273 - 40
drivers/platform/msm/gsi/gsi.c

@@ -11,6 +11,8 @@
 #include <linux/msm_gsi.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/smp.h>
 #include "gsi.h"
 #include "gsi_emulation.h"
 #include "gsihal.h"
@@ -1112,23 +1114,25 @@ static irqreturn_t gsi_msi_isr(int irq, void *ctxt)
 	unsigned long flags;
 	unsigned long cntr;
 	bool empty;
+	uint8_t evt;
+	unsigned long msi;
 	struct gsi_evt_ctx *evt_ctxt;
-	void __iomem *msi_clear_add;
-	void __iomem *msi_add;
 
-	evt_ctxt = (struct gsi_evt_ctx *)(ctxt);
+	/* Determine which event channel to handle */
+	for (msi = 0; msi < gsi_ctx->msi.num; msi++) {
+		if (gsi_ctx->msi.irq[msi] == irq)
+			break;
+	}
+
+	evt = gsi_ctx->msi.evt[msi];
+	evt_ctxt = &gsi_ctx->evtr[evt];
 
 	if (evt_ctxt->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
 		GSIERR("Unexpected irq intf %d\n",
 			evt_ctxt->props.intf);
 		GSI_ASSERT();
 	}
-	/* Clear IRQ by writing irq number to the MSI clear address */
-	msi_clear_add = (void __iomem *)evt_ctxt->props.msi_clear_addr;
-	iowrite32(evt_ctxt->props.intvec, msi_clear_add);
-	/* Writing zero to MSI address as well */
-	msi_add = (void __iomem *)evt_ctxt->props.msi_addr_iore_mapped;
-	iowrite32(0, msi_add);
+
 	/* Clearing IEOB irq if there are any genereated for MSI channel */
 	gsihal_write_reg_nk(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_k, ee,
 		gsihal_get_ch_reg_idx(evt_ctxt->id),
@@ -1309,9 +1313,123 @@ int gsi_unmap_base(void)
 }
 EXPORT_SYMBOL(gsi_unmap_base);
 
+static void __gsi_msi_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	u16 msi = 0;
+
+	if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(msg) || IS_ERR_OR_NULL(gsi_ctx))
+		BUG();
+
+	msi = desc->platform.msi_index;
+
+	/* MSI should be valid and unallocated */
+	if ((msi >= gsi_ctx->msi.num) || (test_bit(msi, gsi_ctx->msi.allocated)))
+		BUG();
+
+	/* Save the message for later use */
+	memcpy(&gsi_ctx->msi.msg[msi], msg, sizeof(*msg));
+
+	dev_notice(gsi_ctx->dev,
+		"saved msi %u msg data %u addr 0x%08x%08x\n", msi,
+		msg->data, msg->address_hi, msg->address_lo);
+
+	/* Single MSI control is used. So MSI address will be same. */
+	if (!gsi_ctx->msi_addr_set) {
+		gsi_ctx->msi_addr = gsi_ctx->msi.msg[msi].address_hi;
+		gsi_ctx->msi_addr = (gsi_ctx->msi_addr << 32) |
+			gsi_ctx->msi.msg[msi].address_lo;
+		gsi_ctx->msi_addr_set = true;
+	}
+
+	GSIDBG("saved msi %u msg data %u addr 0x%08x%08x, MSI:0x%lx\n", msi,
+		msg->data, msg->address_hi, msg->address_lo, gsi_ctx->msi_addr);
+}
+
+static int __gsi_request_msi_irq(unsigned long msi)
+{
+	int result = 0;
+
+	/* Ensure this is not already allocated */
+	if (test_bit((int)msi, gsi_ctx->msi.allocated)) {
+		GSIERR("MSI %lu already allocated\n", msi);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* Request MSI IRQ
+	 * NOTE: During the call to devm_request_irq, the
+	 * __gsi_msi_write_msg callback is triggered.
+	 */
+	result = devm_request_irq(gsi_ctx->dev, gsi_ctx->msi.irq[msi],
+			(irq_handler_t)gsi_msi_isr, IRQF_TRIGGER_NONE,
+			"gsi_msi", gsi_ctx);
+
+	if (result) {
+		GSIERR("failed to register msi irq %u idx %lu\n",
+			gsi_ctx->msi.irq[msi], msi);
+		return -GSI_STATUS_ERROR;
+	}
+
+	set_bit(msi, gsi_ctx->msi.allocated);
+	return result;
+}
+
+static int __gsi_allocate_msis(void)
+{
+	int result = 0;
+	struct msi_desc *desc = NULL;
+	size_t size = 0;
+
+	/* Allocate all MSIs */
+	GSIDBG("gsi_ctx->dev = %lu, gsi_ctx->msi.num = %d", gsi_ctx->dev, gsi_ctx->msi.num);
+	result = platform_msi_domain_alloc_irqs(gsi_ctx->dev, gsi_ctx->msi.num,
+			__gsi_msi_write_msg);
+	if (result) {
+		GSIERR("error allocating platform MSIs - %d\n", result);
+		return -GSI_STATUS_ERROR;
+	}
+	GSIDBG("MSI allocating is succesful\n");
+
+	/* Loop through the allocated MSIs and save the info, then
+	 * request the IRQ.
+	 */
+	for_each_msi_entry(desc, gsi_ctx->dev) {
+		unsigned long msi = desc->platform.msi_index;
+
+		/* Ensure a valid index */
+		if (msi >= gsi_ctx->msi.num) {
+			GSIERR("error invalid MSI %lu\n", msi);
+			result = -GSI_STATUS_ERROR;
+			goto err_free_msis;
+		}
+
+		/* Save IRQ */
+		gsi_ctx->msi.irq[msi] = desc->irq;
+		GSIDBG("desc->irq =%d\n", desc->irq);
+
+		/* Request the IRQ */
+		if (__gsi_request_msi_irq(msi)) {
+			GSIERR("error requesting IRQ for MSI %lu\n",
+				msi);
+			result = -GSI_STATUS_ERROR;
+			goto err_free_msis;
+		}
+		GSIDBG("Requesting IRQ succesful\n");
+	}
+
+	return result;
+
+err_free_msis:
+	size = sizeof(unsigned long) * BITS_TO_LONGS(gsi_ctx->msi.num);
+	platform_msi_domain_free_irqs(gsi_ctx->dev);
+	memset(gsi_ctx->msi.allocated, 0, size);
+
+	return result;
+}
+
 int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 {
 	int res;
+	int result = GSI_STATUS_SUCCESS;
 	struct gsihal_reg_gsi_status gsi_status;
 	struct gsihal_reg_gsi_ee_n_cntxt_gsi_irq gen_irq;
 
@@ -1415,14 +1533,24 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	/* If MSIs are enabled, make sure they are set up */
+	if (gsi_ctx->msi.num) {
+		if (__gsi_allocate_msis()) {
+			GSIERR("failed to allocate MSIs\n");
+			goto err_free_irq;
+		}
+	}
+
 	/*
 	 * If base not previously mapped via gsi_map_base(), map it
 	 * now...
 	 */
 	if (!gsi_ctx->base) {
 		res = gsi_map_base(props->phys_addr, props->size, props->ver);
-		if (res)
-			return res;
+		if (res) {
+			result = res;
+			goto err_free_msis;
+		}
 	}
 
 	if (running_emulation) {
@@ -1444,7 +1572,8 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 			  "failed to remap emulator's interrupt controller HW\n");
 			gsi_unmap_base();
 			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
-			return -GSI_STATUS_RES_ALLOC_FAILURE;
+			result = -GSI_STATUS_RES_ALLOC_FAILURE;
+			goto err_iounmap;
 		}
 
 		GSIDBG(
@@ -1470,7 +1599,8 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max channels\n");
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 	gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
 	if (gsi_ctx->max_ev == 0) {
@@ -1480,12 +1610,14 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("failed to get max event rings\n");
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 
 	if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) {
 		GSIERR("max event rings are beyond absolute maximum\n");
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 
 	if (props->mhi_er_id_limits_valid &&
@@ -1497,7 +1629,8 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 		GSIERR("MHI event ring start id %u is beyond max %u\n",
 			props->mhi_er_id_limits[0], gsi_ctx->max_ev);
-		return -GSI_STATUS_ERROR;
+		result = -GSI_STATUS_ERROR;
+		goto err_iounmap;
 	}
 
 	gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
@@ -1566,19 +1699,34 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
 		res = setup_emulator_cntrlr(
 		    gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size);
 		if (res != 0) {
-			gsi_unmap_base();
-			devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
-			gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
-			devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
 			GSIERR("setup_emulator_cntrlr() failed\n");
-			return res;
+			result = res;
+			goto err_iounmap;
 		}
 	}
 
 	*dev_hdl = (uintptr_t)gsi_ctx;
 	gsi_ctx->gsi_isr_cache_index = 0;
 
-	return GSI_STATUS_SUCCESS;
+	return result;
+err_iounmap:
+	gsi_unmap_base();
+	if (running_emulation && gsi_ctx->intcntrlr_base != NULL)
+		devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base);
+	gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL;
+
+err_free_msis:
+	if (gsi_ctx->msi.num) {
+		size_t size =
+			sizeof(unsigned long) * BITS_TO_LONGS(gsi_ctx->msi.num);
+		platform_msi_domain_free_irqs(gsi_ctx->dev);
+		memset(gsi_ctx->msi.allocated, 0, size);
+	}
+
+err_free_irq:
+	devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+
+	return result;
 }
 EXPORT_SYMBOL(gsi_register_device);
 
@@ -1678,6 +1826,9 @@ int gsi_deregister_device(unsigned long dev_hdl, bool force)
 	__gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
 	__gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
 
+	if (gsi_ctx->msi.num)
+		platform_msi_domain_free_irqs(gsi_ctx->dev);
+
 	devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
 	gsihal_destroy();
 	gsi_unmap_base();
@@ -1946,6 +2097,49 @@ static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* pro
 	return gsihal_read_reg_nk(GSI_EE_n_EV_CH_k_CNTXT_4, ee, id);
 }
 
+static int __gsi_pair_msi(struct gsi_evt_ctx *ctx,
+		struct gsi_evt_ring_props *props)
+{
+	int result = GSI_STATUS_SUCCESS;
+	unsigned long msi = 0;
+
+	if (IS_ERR_OR_NULL(ctx) || IS_ERR_OR_NULL(props) || IS_ERR_OR_NULL(gsi_ctx))
+		BUG();
+
+	/* Find the first unused MSI */
+	msi = find_first_zero_bit(gsi_ctx->msi.used, gsi_ctx->msi.num);
+	if (msi >= gsi_ctx->msi.num) {
+		GSIERR("No free MSIs for evt %u\n", ctx->id);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* Ensure it's been allocated */
+	if (!test_bit((int)msi, gsi_ctx->msi.allocated)) {
+		GSIDBG("MSI %lu not allocated\n", msi);
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* Save the event ID for later lookup */
+	gsi_ctx->msi.evt[msi] = ctx->id;
+
+	/* Add this event to the IRQ mask */
+	set_bit((int)ctx->id, &gsi_ctx->msi.mask);
+
+	props->intvec = gsi_ctx->msi.msg[msi].data;
+	props->msi_addr = (uint64_t)gsi_ctx->msi.msg[msi].address_hi << 32 |
+			(uint64_t)gsi_ctx->msi.msg[msi].address_lo;
+
+	GSIDBG("props->intvec = %d, props->msi_addr = %lu\n", props->intvec, props->msi_addr);
+
+	if (props->msi_addr == 0)
+		BUG();
+
+	/* Mark MSI as used */
+	set_bit(msi, gsi_ctx->msi.used);
+
+	return result;
+}
+
 int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 		unsigned long *evt_ring_hdl)
 {
@@ -2008,25 +2202,25 @@ int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 	init_completion(&ctx->compl);
 	atomic_set(&ctx->chan_ref_cnt, 0);
 	ctx->num_of_chan_allocated = 0;
-	ctx->props = *props;
+	ctx->id = evt_id;
 
-	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
-		ctx->props.intr == GSI_INTR_MSI) {
-		GSIERR("Registering MSI Interrupt for intvec = %d\n",
-			ctx->props.intvec);
-		res = devm_request_irq(gsi_ctx->dev, ctx->props.msi_irq,
-				gsi_msi_isr,
-				IRQF_TRIGGER_HIGH,
-				"gsi",
-				ctx);
-		if (res) {
-			GSIERR("MSI interrupt reg fails res = %d, intvec = %d\n",
-				res, ctx->props.intvec);
-			GSI_ASSERT();
+	mutex_lock(&gsi_ctx->mlock);
+	/* Pair an MSI with this event if this is an MSI and GPI event channel
+	 * NOTE: This modifies props, so must be before props are saved to ctx.
+	 */
+	if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
+		props->intr == GSI_INTR_MSI) {
+		if (__gsi_pair_msi(ctx, props)) {
+			GSIERR("evt_id=%lu failed to pair MSI\n", evt_id);
+			if (!props->evchid_valid)
+				clear_bit(evt_id, &gsi_ctx->evt_bmap);
+			mutex_unlock(&gsi_ctx->mlock);
+			return -GSI_STATUS_NODEV;
 		}
+		GSIDBG("evt_id=%lu pair MSI succesful\n", evt_id);
 	}
+	ctx->props = *props;
 
-	mutex_lock(&gsi_ctx->mlock);
 	ee = gsi_ctx->per.ee;
 	ev_ch_cmd.opcode = op;
 	ev_ch_cmd.chid = evt_id;
@@ -2144,6 +2338,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
 	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
 	struct gsi_evt_ctx *ctx;
 	int res = 0;
+	u32 msi;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2169,10 +2364,20 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	/* Unpair the MSI */
 	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV &&
 		ctx->props.intr == GSI_INTR_MSI) {
 		GSIERR("Interrupt dereg for msi_irq = %d\n", ctx->props.msi_irq);
-		devm_free_irq(gsi_ctx->dev, ctx->props.msi_irq, ctx);
+
+		for (msi = 0; msi < gsi_ctx->msi.num; msi++) {
+			if (gsi_ctx->msi.msg[msi].data == ctx->props.intvec) {
+				mutex_lock(&gsi_ctx->mlock);
+				clear_bit(msi, gsi_ctx->msi.used);
+				gsi_ctx->msi.evt[msi] = 0;
+				clear_bit(evt_ring_hdl, &gsi_ctx->msi.mask);
+				mutex_unlock(&gsi_ctx->mlock);
+			}
+		}
 	}
 
 	mutex_lock(&gsi_ctx->mlock);
@@ -4263,7 +4468,6 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	if (curr == GSI_CHAN_MODE_CALLBACK &&
 			mode == GSI_CHAN_MODE_POLL) {
 		if (gsi_ctx->per.ver >= GSI_VER_3_0) {
-			/* Masking/Unmasking of intrpts is not allowed for MSI chanls */
 			if (ctx->evtr->props.intr != GSI_INTR_MSI) {
 				__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
 				gsihal_get_ch_reg_idx(ctx->evtr->id),
@@ -4314,7 +4518,6 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 				atomic_set(&coal_ctx->poll_mode, mode);
 		}
 		if (gsi_ctx->per.ver >= GSI_VER_3_0) {
-			/* Masking/Unmasking of intrpts is not allowed for MSI chanls */
 			if (ctx->evtr->props.intr != GSI_INTR_MSI) {
 				__gsi_config_ieob_irq_k(gsi_ctx->per.ee,
 				gsihal_get_ch_reg_idx(ctx->evtr->id),
@@ -5264,6 +5467,24 @@ int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr)
 }
 EXPORT_SYMBOL(gsi_query_msi_addr);
 
+int gsi_query_device_msi_addr(u64 *addr)
+{
+    if (!gsi_ctx) {
+            pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+            return -GSI_STATUS_NODEV;
+    }
+
+	if (gsi_ctx->msi_addr_set)
+		*addr = gsi_ctx->msi_addr;
+	else
+		*addr = 0;
+
+	GSIDBG("Device MSI Addr: 0x%lx", *addr);
+    return 0;
+}
+EXPORT_SYMBOL(gsi_query_device_msi_addr);
+
+
 uint64_t gsi_read_event_ring_wp(int evtr_id, int ee)
 {
 	uint64_t wp;
@@ -5506,6 +5727,7 @@ EXPORT_SYMBOL(gsi_update_almst_empty_thrshold);
 static int msm_gsi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	int result;
 
 	pr_debug("gsi_probe\n");
 	gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
@@ -5519,6 +5741,17 @@ static int msm_gsi_probe(struct platform_device *pdev)
 	if (gsi_ctx->ipc_logbuf == NULL)
 		GSIERR("failed to create IPC log, continue...\n");
 
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,num-msi",
+			&gsi_ctx->msi.num);
+	if (result)
+		GSIERR("No MSIs configured\n");
+	else {
+		if (gsi_ctx->msi.num > GSI_MAX_NUM_MSI) {
+			GSIERR("Num MSIs %u larger than max %u, normalizing\n");
+			gsi_ctx->msi.num = GSI_MAX_NUM_MSI;
+		} else GSIDBG("Num MSIs=%u\n", gsi_ctx->msi.num);
+	}
+
 	gsi_ctx->dev = dev;
 	init_completion(&gsi_ctx->gen_ee_cmd_compl);
 	gsi_debugfs_init();

+ 25 - 1
drivers/platform/msm/gsi/gsi.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef GSI_H
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/iommu.h>
+#include <linux/msi.h>
 
 /*
  * The following for adding code (ie. for EMULATION) not found on x86.
@@ -74,6 +75,7 @@
 	} while (0)
 
 #define GSI_IPC_LOG_PAGES 50
+#define GSI_MAX_NUM_MSI 2
 
 enum gsi_ver {
 	GSI_VER_ERR = 0,
@@ -1430,6 +1432,16 @@ struct gsi_log_ts {
 	u32 interrupt_type;
 };
 
+struct gsi_msi {
+	u32 num;
+	DECLARE_BITMAP(allocated, GSI_MAX_NUM_MSI);
+	DECLARE_BITMAP(used, GSI_MAX_NUM_MSI);
+	struct msi_msg msg[GSI_MAX_NUM_MSI];
+	u32 irq[GSI_MAX_NUM_MSI];
+	u32 evt[GSI_MAX_NUM_MSI];
+	unsigned long mask;
+};
+
 struct gsi_ctx {
 	void __iomem *base;
 	struct device *dev;
@@ -1454,6 +1466,9 @@ struct gsi_ctx {
 	void *ipc_logbuf;
 	void *ipc_logbuf_low;
 	struct gsi_coal_chan_info coal_info;
+	bool msi_addr_set;
+	uint64_t msi_addr;
+	struct gsi_msi msi;
 	/*
 	 * The following used only on emulation systems.
 	 */
@@ -2357,6 +2372,15 @@ int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
 */
 int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr);
 
+/**
+* gsi_query_device_msi_addr - get gsi device msi address
+*
+* @addr: [out] msi address
+*
+* @Return gsi_status
+*/
+int gsi_query_device_msi_addr(u64 *addr);
+
 /**
 * gsi_update_almst_empty_thrshold - update almst_empty_thrshold
 *

+ 6 - 81
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -7284,7 +7284,7 @@ void ipa3_notify_clients_registered(void)
 }
 EXPORT_SYMBOL(ipa3_notify_clients_registered);
 
-void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
+static void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
 {
 	struct ipa_smmu_cb_ctx *cb;
 	u64 rounddown_addr;
@@ -7300,21 +7300,14 @@ void ipa_gsi_map_unmap_gsi_msi_addr(bool map)
 			IPAERR("iommu mapping failed for gsi_msi_addr\n");
 			ipa_assert();
 		}
-		ipa3_ctx->gsi_msi_clear_addr_io_mapped =
-			(u64)ioremap(ipa3_ctx->gsi_msi_clear_addr, 4);
-		ipa3_ctx->gsi_msi_addr_io_mapped =
-			(u64)ioremap(ipa3_ctx->gsi_msi_addr, 4);
 	} else {
-		iounmap((int *) ipa3_ctx->gsi_msi_clear_addr_io_mapped);
-		iounmap((int *) ipa3_ctx->gsi_msi_addr_io_mapped);
 		res = iommu_unmap(cb->iommu_domain, rounddown_addr, PAGE_SIZE);
-		ipa3_ctx->gsi_msi_clear_addr_io_mapped = 0;
-		ipa3_ctx->gsi_msi_addr_io_mapped = 0;
 		if (res)
 			IPAERR("smmu unmap for gsi_msi_addr failed %d\n", res);
 	}
 }
 
+
 /**
  * ipa3_post_init() - Initialize the IPA Driver (Part II).
  * This part contains all initialization which requires interaction with
@@ -7703,11 +7696,11 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 
 	ipa_ut_module_init();
 
+	/* Query MSI address. */
+	gsi_query_device_msi_addr(&ipa3_ctx->gsi_msi_addr);
 	/* Map the MSI addresses for the GSI to access, for LL and QMAP FC pipe */
-	if (!ipa3_ctx->gsi_msi_addr_io_mapped &&
-		!ipa3_ctx->gsi_msi_clear_addr_io_mapped &&
-		(ipa3_ctx->rmnet_ll_enable || ipa3_ctx->rmnet_ctl_enable))
-			ipa_gsi_map_unmap_gsi_msi_addr(true);
+	if (ipa3_ctx->gsi_msi_addr)
+		ipa_gsi_map_unmap_gsi_msi_addr(true);
 
 	if(!ipa_spearhead_stats_init())
 		IPADBG("Fail to init spearhead ipa lnx module");
@@ -8612,18 +8605,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 	ipa3_ctx->ipa_gpi_event_rp_ddr = resource_p->ipa_gpi_event_rp_ddr;
 	ipa3_ctx->rmnet_ctl_enable = resource_p->rmnet_ctl_enable;
 	ipa3_ctx->rmnet_ll_enable = resource_p->rmnet_ll_enable;
-	ipa3_ctx->gsi_msi_addr = resource_p->gsi_msi_addr;
-	ipa3_ctx->gsi_msi_addr_io_mapped = 0;
-	ipa3_ctx->gsi_msi_clear_addr_io_mapped = 0;
-	ipa3_ctx->gsi_msi_clear_addr = resource_p->gsi_msi_clear_addr;
-	ipa3_ctx->gsi_rmnet_ctl_evt_ring_intvec =
-		resource_p->gsi_rmnet_ctl_evt_ring_intvec;
-	ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq =
-		resource_p->gsi_rmnet_ctl_evt_ring_irq;
-	ipa3_ctx->gsi_rmnet_ll_evt_ring_intvec =
-		resource_p->gsi_rmnet_ll_evt_ring_intvec;
-	ipa3_ctx->gsi_rmnet_ll_evt_ring_irq =
-		resource_p->gsi_rmnet_ll_evt_ring_irq;
 	ipa3_ctx->tx_wrapper_cache_max_size = get_tx_wrapper_cache_size(
 			resource_p->tx_wrapper_cache_max_size);
 	ipa3_ctx->ipa_config_is_auto = resource_p->ipa_config_is_auto;
@@ -9472,10 +9453,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	u32 ipa_holb_monitor_max_cnt_usb;
 	u32 ipa_holb_monitor_max_cnt_11ad;
 	u32 ipa_wan_aggr_pkt_cnt;
-	u32 gsi_msi_addr;
-	u32 gsi_msi_clear_addr;
-	u32 gsi_rmnet_ctl_evt_ring_intvec;
-	u32 gsi_rmnet_ll_evt_ring_intvec;
 
 	/* initialize ipa3_res */
 	ipa_drv_res->ipa_wdi3_2g_holb_timeout = 0;
@@ -9816,58 +9793,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 			? "True" : "False");
 	}
 
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-msi-addr",
-		&gsi_msi_addr);
-	IPADBG("GSI MSI addr = %lu\n", gsi_msi_addr);
-	ipa_drv_res->gsi_msi_addr = (u64)gsi_msi_addr;
-
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-msi-clear-addr",
-		&gsi_msi_clear_addr);
-	IPADBG("GSI MSI clear addr = %lu\n", gsi_msi_clear_addr);
-	ipa_drv_res->gsi_msi_clear_addr = (u64)gsi_msi_clear_addr;
-
-	/* Get IPA MSI IRQ number for rmnet_ctl */
-	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-		"msi-irq-rmnet-ctl");
-	if (!resource) {
-		ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq = 0;
-		IPAERR(":get resource failed for msi-irq-rmnet-ctl\n");
-	} else {
-		ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq = resource->start;
-		IPADBG(": msi-irq-rmnet-ctl = %d\n",
-			ipa_drv_res->gsi_rmnet_ctl_evt_ring_irq);
-	}
-
-	/* Get IPA MSI IRQ number for rmnet_ll */
-	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-		"msi-irq-rmnet-ll");
-	if (!resource) {
-		ipa_drv_res->gsi_rmnet_ll_evt_ring_irq = 0;
-		IPAERR(":get resource failed for msi-irq-rmnet-ll\n");
-	} else {
-		ipa_drv_res->gsi_rmnet_ll_evt_ring_irq = resource->start;
-		IPADBG(": msi-irq-rmnet-ll = %d\n",
-			ipa_drv_res->gsi_rmnet_ll_evt_ring_irq);
-	}
-
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-rmnet-ctl-evt-ring-intvec",
-		&gsi_rmnet_ctl_evt_ring_intvec);
-	IPADBG("gsi_rmnet_ctl_evt_ring_intvec = %u\n",
-		gsi_rmnet_ctl_evt_ring_intvec);
-	ipa_drv_res->gsi_rmnet_ctl_evt_ring_intvec =
-		gsi_rmnet_ctl_evt_ring_intvec;
-
-	result = of_property_read_u32(pdev->dev.of_node,
-		"qcom,gsi-rmnet-ll-evt-ring-intvec",
-		&gsi_rmnet_ll_evt_ring_intvec);
-	IPADBG("gsi_rmnet_ll_evt_ring_intvec = %u\n",
-		gsi_rmnet_ll_evt_ring_intvec);
-	ipa_drv_res->gsi_rmnet_ll_evt_ring_intvec =
-		gsi_rmnet_ll_evt_ring_intvec;
-
 	result = of_property_read_string(pdev->dev.of_node,
 			"qcom,use-gsi-ipa-fw", &ipa_drv_res->gsi_fw_file_name);
 	if (!result)

+ 5 - 19
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -5210,25 +5210,11 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 	evt_rp_dma_addr = 0;
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
 	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
-	if ((ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) &&
-		ipa3_ctx->gsi_rmnet_ll_evt_ring_irq) {
-		gsi_evt_ring_props.intr = GSI_INTR_MSI;
-		gsi_evt_ring_props.msi_addr = ipa3_ctx->gsi_msi_addr;
-		gsi_evt_ring_props.msi_clear_addr = ipa3_ctx->gsi_msi_clear_addr_io_mapped;
-		gsi_evt_ring_props.msi_addr_iore_mapped = ipa3_ctx->gsi_msi_addr_io_mapped;
-		gsi_evt_ring_props.intvec = ipa3_ctx->gsi_rmnet_ll_evt_ring_intvec;
-		gsi_evt_ring_props.msi_irq = ipa3_ctx->gsi_rmnet_ll_evt_ring_irq;
-	} else if ((ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) &&
-		ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq) {
-		gsi_evt_ring_props.intr = GSI_INTR_MSI;
-		gsi_evt_ring_props.msi_addr = ipa3_ctx->gsi_msi_addr;
-		gsi_evt_ring_props.msi_clear_addr = ipa3_ctx->gsi_msi_clear_addr_io_mapped;
-		gsi_evt_ring_props.msi_addr_iore_mapped = ipa3_ctx->gsi_msi_addr_io_mapped;
-		gsi_evt_ring_props.intvec = ipa3_ctx->gsi_rmnet_ctl_evt_ring_intvec;
-		gsi_evt_ring_props.msi_irq = ipa3_ctx->gsi_rmnet_ctl_evt_ring_irq;
-	} else {
-		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
-	}
+	if ((ipa3_ctx->gsi_msi_addr) &&
+		(ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS ||
+		ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS))
+		gsi_evt_ring_props.intr = GSI_INTR_MSI; // intvec chosen dynamically.
+	else gsi_evt_ring_props.intr = GSI_INTR_IRQ;
 	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
 	gsi_evt_ring_props.ring_len = ring_size;
 	gsi_evt_ring_props.ring_base_vaddr =

+ 1 - 22
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -2100,13 +2100,6 @@ struct ipa3_eth_error_stats {
  * @uc_fw_file_name: uC IPA fw file name
  * @eth_info: ethernet client mapping
  * @max_num_smmu_cb: number of smmu s1 cb supported
- * @u64 gsi_msi_addr: MSI SPI set address APSS_GICA_SETSPI_NSR
- * @u64 gsi_msi_clear_addr: MSI SPI clear address APSS_GICA_CLRSPI_NSR
- * @u64 gsi_msi_ioremapped_addr: iore mapped address for debugging purpose
- * @u32 gsi_rmnet_ctl_evt_ring_irq: IRQ number for rmnet_ctl pipe
- * @u32 gsi_rmnet_ll_evt_ring_irq; IRQ number for rmnet_ll pipe
- * @u32 gsi_rmnet_ctl_evt_ring_intvec: HW IRQ number for rmnet_ctl pipe
- * @u32 gsi_rmnet_ll_evt_ring_intvec; HW IRQ number for rmnet_ll pipe
  * @non_hash_flt_lcl_sys_switch: number of times non-hash flt table moved
  */
 struct ipa3_context {
@@ -2332,19 +2325,12 @@ struct ipa3_context {
 	u8 page_poll_threshold;
 	u32 non_hash_flt_lcl_sys_switch;
 	bool wan_common_page_pool;
-	u64 gsi_msi_addr;
-	u64 gsi_msi_clear_addr;
-	u64 gsi_msi_addr_io_mapped;
-	u64 gsi_msi_clear_addr_io_mapped;
-	u32 gsi_rmnet_ctl_evt_ring_intvec;
-	u32 gsi_rmnet_ctl_evt_ring_irq;
-	u32 gsi_rmnet_ll_evt_ring_intvec;
-	u32 gsi_rmnet_ll_evt_ring_irq;
 	bool use_tput_est_ep;
 	struct ipa_ioc_eogre_info eogre_cache;
 	bool eogre_enabled;
 	bool is_device_crashed;
 	bool ulso_wa;
+	u64 gsi_msi_addr;
 };
 
 struct ipa3_plat_drv_res {
@@ -2423,12 +2409,6 @@ struct ipa3_plat_drv_res {
 	u16 ulso_ip_id_min;
 	u16 ulso_ip_id_max;
 	bool use_pm_wrapper;
-	u64 gsi_msi_addr;
-	u64 gsi_msi_clear_addr;
-	u32 gsi_rmnet_ctl_evt_ring_intvec;
-	u32 gsi_rmnet_ctl_evt_ring_irq;
-	u32 gsi_rmnet_ll_evt_ring_intvec;
-	u32 gsi_rmnet_ll_evt_ring_irq;
 	bool use_tput_est_ep;
 	bool ulso_wa;
 };
@@ -3414,7 +3394,6 @@ irq_handler_t ipa3_get_isr(void);
 void ipa_pc_qmp_enable(void);
 u32 ipa3_get_r_rev_version(void);
 void ipa3_notify_clients_registered(void);
-void ipa_gsi_map_unmap_gsi_msi_addr(bool map);
 #if defined(CONFIG_IPA3_REGDUMP)
 int ipa_reg_save_init(u32 value);
 void ipa_save_registers(void);