Przeglądaj źródła

Merge "asoc: enable MDF initialization in qcs405 target"

Linux Build Service Account 6 lat temu
rodzic
commit
602e037449

+ 7 - 0
asoc/qcs405.c

@@ -31,6 +31,7 @@
 #include <dsp/audio_notifier.h>
 #include <dsp/q6afe-v2.h>
 #include <dsp/q6core.h>
+#include <dsp/msm_mdf.h>
 #include "device_event.h"
 #include "msm-pcm-routing-v2.h"
 #include "codecs/msm-cdc-pinctrl.h"
@@ -8402,6 +8403,11 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
 	dev_info(&pdev->dev, "Sound card %s registered\n", card->name);
 	spdev = pdev;
 
+	ret = msm_mdf_mem_init();
+	if (ret)
+		dev_err(&pdev->dev, "msm_mdf_mem_init failed (%d)\n",
+			 ret);
+
 	/* Parse pinctrl info from devicetree */
 	ret = msm_get_pinctrl(pdev);
 	if (!ret) {
@@ -8426,6 +8432,7 @@ static int msm_asoc_machine_remove(struct platform_device *pdev)
 {
 	audio_notifier_deregister("qcs405");
 	msm_i2s_auxpcm_deinit();
+	msm_mdf_mem_deinit();
 
 	msm_release_pinctrl(pdev);
 	return 0;

+ 1 - 0
config/qcs405auto.conf

@@ -31,6 +31,7 @@ CONFIG_DTS_EAGLE=m
 CONFIG_DOLBY_DS2=m
 CONFIG_DOLBY_LICENSE=m
 CONFIG_DTS_SRS_TM=m
+CONFIG_MSM_MDF=m
 CONFIG_SND_SOC_MSM_STUB=m
 CONFIG_MSM_AVTIMER=m
 CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=m

+ 1 - 0
config/qcs405autoconf.h

@@ -44,6 +44,7 @@
 #define CONFIG_DOLBY_LICENSE 1
 #define CONFIG_DTS_SRS_TM 1
 #define CONFIG_SND_SOC_MSM_STUB 1
+#define CONFIG_MSM_MDF 1
 #define CONFIG_MSM_AVTIMER 1
 #define CONFIG_SND_SOC_MSM_HDMI_CODEC_RX 1
 #define CONFIG_SND_SOC_EP92 1

+ 4 - 0
dsp/Kbuild

@@ -123,6 +123,10 @@ ifdef CONFIG_MSM_ULTRASOUND
 USF_OBJS += usf.o usfcdev.o q6usm.o
 endif
 
+ifdef CONFIG_MSM_MDF
+	Q6_OBJS += msm_mdf.o
+endif
+
 LINUX_INC +=	-Iinclude/linux
 
 INCS +=		$(COMMON_INC) \

+ 22 - 0
dsp/msm_audio_ion.c

@@ -220,6 +220,28 @@ err:
 	return rc;
 }
 
+int msm_audio_ion_get_smmu_info(struct device **cb_dev,
+		u64 *smmu_sid)
+{
+	if (!cb_dev || !smmu_sid) {
+		pr_err("%s: Invalid params\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!msm_audio_ion_data.cb_dev ||
+		!msm_audio_ion_data.smmu_sid_bits) {
+		pr_err("%s: Params not initialized\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	*cb_dev = msm_audio_ion_data.cb_dev;
+	*smmu_sid = msm_audio_ion_data.smmu_sid_bits;
+
+	return 0;
+}
+
 static void *msm_audio_ion_map_kernel(struct dma_buf *dma_buf)
 {
 	int rc = 0;

+ 637 - 0
dsp/msm_mdf.c

@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/ion_kernel.h>
+#include <linux/msm_ion.h>
+#include <dsp/msm_audio_ion.h>
+#include <ipc/apr.h>
+#include <dsp/msm_mdf.h>
+#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <dsp/q6audio-v2.h>
+#include <dsp/q6core.h>
+
+#define VMID_SSC_Q6     5
+#define VMID_LPASS      6
+#define VMID_MSS_MSA    15
+#define VMID_CDSP	8
+
+#define MSM_MDF_PROBED         (1 << 0)
+#define MSM_MDF_INITIALIZED    (1 << 1)
+#define MSM_MDF_MEM_ALLOCATED  (1 << 2)
+#define MSM_MDF_MEM_MAPPED     (1 << 3)
+#define MSM_MDF_MEM_PERMISSION (1 << 4) /* 0 - HLOS, 1 - Subsys */
+
+/* TODO: Update IOVA range for subsys SMMUs */
+#define MSM_MDF_IOVA_START 0x80000000
+#define MSM_MDF_IOVA_LEN 0x800000
+
+#define MSM_MDF_SMMU_SID_OFFSET 32
+
+#define ADSP_STATE_READY_TIMEOUT_MS 3000
+
+enum {
+	SUBSYS_ADSP, /* Audio DSP must have index 0 */
+	SUBSYS_SCC,  /* Sensor DSP */
+	SUBSYS_MSS,  /* Modem DSP */
+	SUBSYS_CDSP, /* Compute DSP */
+	SUBSYS_MAX,
+};
+
+struct msm_mdf_mem {
+	struct device *dev;
+	uint8_t device_status;
+	uint32_t map_handle;
+	struct dma_buf *dma_buf;
+	dma_addr_t dma_addr;
+	size_t size;
+	void *va;
+};
+
+static struct msm_mdf_mem mdf_mem_data = {NULL,};
+
+struct msm_mdf_smmu {
+	bool enabled;
+	char *subsys;
+	int vmid;
+	uint32_t proc_id;
+	struct device *cb_dev;
+	uint8_t device_status;
+	uint64_t sid;
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t pa;
+	size_t pa_len;
+};
+
+static struct msm_mdf_smmu mdf_smmu_data[SUBSYS_MAX] = {
+	{
+		.subsys = "adsp",
+		.vmid = VMID_LPASS,
+	},
+	{
+		.subsys = "dsps",
+		.vmid = VMID_SSC_Q6,
+		.proc_id = AVS_MDF_SSC_PROC_ID,
+	},
+	{
+		.subsys = "modem",
+		.vmid = VMID_MSS_MSA,
+		.proc_id = AVS_MDF_MDSP_PROC_ID,
+	},
+	{
+		.subsys = "cdsp",
+		.vmid = VMID_CDSP,
+		.proc_id = AVS_MDF_CDSP_PROC_ID,
+	},
+};
+
+static void *ssr_handle;
+
+static inline uint64_t buf_page_start(uint64_t buf)
+{
+	uint64_t start = (uint64_t) buf & PAGE_MASK;
+	return start;
+}
+
+static inline uint64_t buf_page_offset(uint64_t buf)
+{
+	uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
+	return offset;
+}
+
+static inline int buf_num_pages(uint64_t buf, ssize_t len)
+{
+	uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
+	uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+	int nPages = end - start + 1;
+	return nPages;
+}
+
+static inline uint64_t buf_page_size(uint32_t size)
+{
+	uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+	return sz > PAGE_SIZE ? sz : PAGE_SIZE;
+}
+
+static inline void *uint64_to_ptr(uint64_t addr)
+{
+	void *ptr = (void *)((uintptr_t)addr);
+	return ptr;
+}
+
+static inline uint64_t ptr_to_uint64(void *ptr)
+{
+	uint64_t addr = (uint64_t)((uintptr_t)ptr);
+	return addr;
+}
+
+static int msm_mdf_dma_buf_map(struct msm_mdf_mem *mem,
+			       struct msm_mdf_smmu *smmu)
+{
+	int rc = 0;
+
+	if (!smmu)
+		return -EINVAL;
+	if (smmu->device_status & MSM_MDF_MEM_MAPPED)
+		return 0;
+	if (smmu->enabled) {
+		if (smmu->cb_dev == NULL) {
+			pr_err("%s: cb device is not initialized\n",
+				__func__);
+			/* Retry if LPASS cb device is not ready
+			 * from audio ION during probing.
+			 */
+			if (!strcmp("adsp", smmu->subsys)) {
+				rc = msm_audio_ion_get_smmu_info(&smmu->cb_dev,
+						&smmu->sid);
+				if (rc) {
+					pr_err("%s: msm_audio_ion_get_smmu_info failed, rc = %d\n",
+						__func__, rc);
+					goto err;
+				}
+			} else
+				return -ENODEV;
+		}
+
+		smmu->pa = dma_map_single(smmu->cb_dev, mem->va,
+					  mem->size, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(smmu->cb_dev, smmu->pa)) {
+			rc = -ENOMEM;
+			pr_err("%s: failed to map single, rc = %d\n",
+				__func__, rc);
+			goto err;
+		}
+		smmu->pa_len = mem->size;
+
+		/* Append the SMMU SID information to the IOVA address */
+		if (smmu->sid)
+			smmu->pa |= smmu->sid;
+	} else {
+		smmu->pa = mem->dma_addr;
+		smmu->pa_len = mem->size;
+	}
+	pr_err("%s: pa=%pa, pa_len=%zd\n", __func__,
+		&smmu->pa, smmu->pa_len);
+
+	smmu->device_status |= MSM_MDF_MEM_MAPPED;
+
+	return 0;
+err:
+	return rc;
+}
+
+static int msm_mdf_alloc_dma_buf(struct msm_mdf_mem *mem)
+{
+	int rc = 0;
+
+	if (!mem)
+		return -EINVAL;
+
+	if (mem->device_status & MSM_MDF_MEM_ALLOCATED)
+		return 0;
+
+	if (mem->dev == NULL) {
+		pr_err("%s: device is not initialized\n",
+		__func__);
+		return -ENODEV;
+	}
+
+	mem->va = dma_alloc_coherent(mem->dev, mem->size,
+			&mem->dma_addr, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(mem->va)) {
+		pr_err("%s: failed to allocate dma memory, rc = %d\n",
+			__func__, rc);
+		return -ENOMEM;
+	}
+	mem->va = phys_to_virt(mem->dma_addr);
+	mem->device_status |= MSM_MDF_MEM_ALLOCATED;
+	return rc;
+}
+
+static int msm_mdf_free_dma_buf(struct msm_mdf_mem *mem)
+{
+	if (!mem)
+		return -EINVAL;
+
+	if (mem->dev == NULL) {
+		pr_err("%s: device is not initialized\n",
+		__func__);
+		return -ENODEV;
+	}
+
+	//dma_free_coherent(mem->dev, mem->size, mem->va,
+	//				  mem->dma_addr);
+
+	mem->device_status &= ~MSM_MDF_MEM_ALLOCATED;
+	return 0;
+}
+
+static int msm_mdf_dma_buf_unmap(struct msm_mdf_mem *mem,
+				 struct msm_mdf_smmu *smmu)
+{
+	if (!smmu)
+		return -EINVAL;
+
+	if (smmu->enabled) {
+		if (smmu->cb_dev == NULL) {
+			pr_err("%s: cb device is not initialized\n",
+				__func__);
+			return -ENODEV;
+		}
+		//if (smmu->pa && mem->size)
+			//dma_unmap_single(smmu->cb_dev, smmu->pa,
+			//		 mem->size, DMA_BIDIRECTIONAL);
+	}
+
+	smmu->device_status &= ~MSM_MDF_MEM_MAPPED;
+
+	return 0;
+}
+
+static int msm_mdf_map_memory_to_subsys(struct msm_mdf_mem *mem,
+				struct msm_mdf_smmu *smmu)
+{
+	int rc = 0;
+
+	if (!mem || !smmu)
+		return -EINVAL;
+
+	/* Map mdf shared memory to ADSP */
+	if (!strcmp("adsp", smmu->subsys)) {
+		rc = q6core_map_memory_regions((phys_addr_t *)&smmu->pa,
+				ADSP_MEMORY_MAP_MDF_SHMEM_4K_POOL,
+				(uint32_t *)&smmu->pa_len, 1, &mem->map_handle);
+		if (rc)  {
+			pr_err("%s: q6core_map_memory_regions failed, rc = %d\n",
+				__func__, rc);
+		}
+	} else {
+		if (mem->map_handle) {
+			/* Map mdf shared memory to remote DSPs */
+			rc = q6core_map_mdf_shared_memory(mem->map_handle,
+					(phys_addr_t *)&smmu->pa, smmu->proc_id,
+					(uint32_t *)&smmu->pa_len, 1);
+			if (rc)  {
+				pr_err("%s: q6core_map_mdf_shared_memory failed, rc = %d\n",
+					__func__, rc);
+			}
+		}
+	}
+	return rc;
+}
+
+static void msm_mdf_unmap_memory_to_subsys(struct msm_mdf_mem *mem,
+				struct msm_mdf_smmu *smmu)
+{
+	if (!mem || !smmu)
+		return;
+
+	if (!strcmp("adsp", smmu->subsys)) {
+		if (mem->map_handle)
+			q6core_memory_unmap_regions(mem->map_handle);
+	}
+}
+
+/**
+ * msm_mdf_mem_init - Initializes MDF memory pool and
+ * map memory to subsystem
+ *
+ * Returns 0 on success or ret on failure.
+ */
+
+int msm_mdf_mem_init(void)
+{
+	int rc = 0, i, j;
+	struct msm_mdf_mem *mem = &mdf_mem_data;
+	struct msm_mdf_smmu *smmu;
+	unsigned long timeout = jiffies +
+		msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+	int adsp_ready = 0;
+
+	if (!(mdf_mem_data.device_status & MSM_MDF_PROBED))
+		return -ENODEV;
+
+	if (mdf_mem_data.device_status & MSM_MDF_INITIALIZED)
+		return 0;
+
+	/* TODO: pulling may not be needed as Q6 Core state should be
+	 * checked during machine driver probing.
+	 */
+	do {
+		if (!q6core_is_adsp_ready()) {
+			pr_err("%s: ADSP Audio NOT Ready\n",
+				__func__);
+			/* ADSP will be coming up after subsystem restart and
+			 * it might not be fully up when the control reaches
+			 * here. So, wait for 50msec before checking ADSP state
+			 */
+			msleep(50);
+		} else {
+			pr_debug("%s: ADSP Audio Ready\n",
+					__func__);
+			adsp_ready = 1;
+			break;
+		}
+	} while (time_after(timeout, jiffies));
+
+	if (!adsp_ready) {
+		pr_err("%s: timed out waiting for ADSP Audio\n",
+			__func__);
+		return -ETIMEDOUT;
+	}
+
+	if (mem->device_status & MSM_MDF_MEM_ALLOCATED) {
+		for (i = 0; i < SUBSYS_MAX; i++) {
+			smmu = &mdf_smmu_data[i];
+			rc = msm_mdf_dma_buf_map(mem, smmu);
+			if (rc) {
+				pr_err("%s: msm_mdf_dma_buf_map failed, rc = %d\n",
+					__func__, rc);
+				goto err;
+			}
+		}
+
+		for (j = 0; j < SUBSYS_MAX; j++) {
+			smmu = &mdf_smmu_data[j];
+			rc = msm_mdf_map_memory_to_subsys(mem, smmu);
+			if (rc) {
+				pr_err("%s: msm_mdf_map_memory_to_subsys failed\n",
+					__func__);
+				goto err;
+			}
+		}
+
+		mdf_mem_data.device_status |= MSM_MDF_INITIALIZED;
+	}
+	return 0;
+err:
+	return rc;
+}
+EXPORT_SYMBOL(msm_mdf_mem_init);
+
+int msm_mdf_mem_deinit(void)
+{
+	int rc = 0, i;
+	struct msm_mdf_mem *mem = &mdf_mem_data;
+	struct msm_mdf_smmu *smmu;
+
+	if (!(mdf_mem_data.device_status & MSM_MDF_INITIALIZED))
+		return -ENODEV;
+
+	for (i = SUBSYS_MAX - 1; i >= 0; i--) {
+		smmu = &mdf_smmu_data[i];
+		msm_mdf_unmap_memory_to_subsys(mem, smmu);
+	}
+
+	if (!rc) {
+		for (i = SUBSYS_MAX - 1; i >= 0; i--) {
+			smmu = &mdf_smmu_data[i];
+			msm_mdf_dma_buf_unmap(mem, smmu);
+		}
+
+		msm_mdf_free_dma_buf(mem);
+		mem->device_status &= ~MSM_MDF_MEM_ALLOCATED;
+	}
+
+	mdf_mem_data.device_status &= ~MSM_MDF_INITIALIZED;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_mdf_mem_deinit);
+
+static int msm_mdf_restart_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *_cmd)
+{
+	static int boot_count = 3;
+
+	/* During LPASS boot, HLOS receives events:
+	 *  SUBSYS_BEFORE_POWERUP
+	 *  SUBSYS_PROXY_VOTE
+	 *  SUBSYS_AFTER_POWERUP - need skip
+	 *  SUBSYS_PROXY_UNVOTE
+	 */
+	if (boot_count) {
+		boot_count--;
+		return NOTIFY_OK;
+	}
+
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		pr_debug("Subsys Notify: Shutdown Started\n");
+		/* Unmap and free memory upon restart event. */
+		msm_mdf_mem_deinit();
+		break;
+	case SUBSYS_AFTER_SHUTDOWN:
+		pr_debug("Subsys Notify: Shutdown Completed\n");
+		break;
+	case SUBSYS_BEFORE_POWERUP:
+		pr_debug("Subsys Notify: Bootup Started\n");
+		break;
+	case SUBSYS_AFTER_POWERUP:
+		pr_debug("Subsys Notify: Bootup Completed\n");
+		/* Allocate and map memory after restart complete. */
+		if (msm_mdf_mem_init())
+			pr_err("msm_mdf_mem_init failed\n");
+		break;
+	default:
+		pr_err("Subsys Notify: Generel: %lu\n", code);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static const struct of_device_id msm_mdf_match_table[] = {
+	{ .compatible = "qcom,msm-mdf", },
+	{ .compatible = "qcom,msm-mdf-mem-region", },
+	{ .compatible = "qcom,msm-mdf-cb", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_mdf_match_table);
+
+static int msm_mdf_cb_probe(struct device *dev)
+{
+	struct msm_mdf_smmu *smmu;
+	const char *subsys;
+	int rc = 0, i;
+
+	subsys = of_get_property(dev->of_node, "label", NULL);
+	if (!subsys) {
+		dev_err(dev, "%s: could not get label\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < SUBSYS_MAX; i++) {
+		if (!mdf_smmu_data[i].subsys)
+			continue;
+		if (!strcmp(subsys, mdf_smmu_data[i].subsys))
+			break;
+	}
+	if (i >= SUBSYS_MAX) {
+		dev_err(dev, "%s: subsys %s not supported\n",
+			__func__, subsys);
+		return -EINVAL;
+	}
+
+	smmu = &mdf_smmu_data[i];
+
+	smmu->enabled = of_property_read_bool(dev->of_node,
+						"qcom,smmu-enabled");
+
+	dev_info(dev, "%s: SMMU is %s for %s\n", __func__,
+		(smmu->enabled) ? "enabled" : "disabled",
+		smmu->subsys);
+
+	if (smmu->enabled) {
+		if (!strcmp("adsp", smmu->subsys)) {
+			/* Get SMMU info from audio ION */
+			rc = msm_audio_ion_get_smmu_info(&smmu->cb_dev,
+					&smmu->sid);
+			if (rc) {
+				dev_err(dev, "%s: msm_audio_ion_get_smmu_info failed, rc = %d\n",
+					__func__, rc);
+				goto err;
+			}
+		}
+	} else {
+		/* Setup SMMU CB if enabled for subsys other than ADSP */
+	}
+	return 0;
+err:
+	return rc;
+}
+
+static int msm_mdf_remove(struct platform_device *pdev)
+{
+	int rc = 0, i;
+
+	for (i = 0; i < SUBSYS_MAX; i++) {
+		if (!IS_ERR_OR_NULL(mdf_smmu_data[i].cb_dev))
+			arm_iommu_detach_device(mdf_smmu_data[i].cb_dev);
+		if (!IS_ERR_OR_NULL(mdf_smmu_data[i].mapping))
+			arm_iommu_release_mapping(mdf_smmu_data[i].mapping);
+		mdf_smmu_data[i].enabled = 0;
+	}
+	mdf_mem_data.device_status = 0;
+
+	return rc;
+}
+
+static int msm_mdf_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	enum apr_subsys_state q6_state;
+	struct device *dev = &pdev->dev;
+	uint32_t mdf_mem_data_size = 0;
+
+	/* TODO: MDF probing should have no dependency
+	 * on ADSP Q6 state.
+	 */
+	q6_state = apr_get_q6_state();
+	if (q6_state == APR_SUBSYS_DOWN) {
+		dev_dbg(dev, "defering %s, adsp_state %d\n",
+			__func__, q6_state);
+		rc = -EPROBE_DEFER;
+		goto err;
+	} else
+		dev_dbg(dev, "%s: adsp is ready\n", __func__);
+
+	if (of_device_is_compatible(dev->of_node,
+					"qcom,msm-mdf-cb"))
+		return msm_mdf_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+					"qcom,msm-mdf-mem-region")) {
+		mdf_mem_data.dev = dev;
+
+		rc = of_property_read_u32(dev->of_node,
+				    "qcom,msm-mdf-mem-data-size",
+				    &mdf_mem_data_size);
+		if (rc) {
+			dev_dbg(&pdev->dev, "MDF mem data size entry not found\n");
+			goto err;
+		}
+
+		mdf_mem_data.size = mdf_mem_data_size;
+		dev_info(dev, "%s: mem region size %zd\n",
+			__func__, mdf_mem_data.size);
+		msm_mdf_alloc_dma_buf(&mdf_mem_data);
+		return 0;
+	}
+
+	rc = of_platform_populate(pdev->dev.of_node,
+					msm_mdf_match_table,
+					NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to populate child nodes",
+			__func__);
+		goto err;
+	}
+	mdf_mem_data.device_status |= MSM_MDF_PROBED;
+
+err:
+	return rc;
+}
+
+static struct platform_driver msm_mdf_driver = {
+	.probe = msm_mdf_probe,
+	.remove = msm_mdf_remove,
+	.driver = {
+		.name = "msm-mdf",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_mdf_match_table,
+	},
+};
+
+static struct notifier_block nb = {
+	.priority = 0,
+	.notifier_call = msm_mdf_restart_notifier_cb,
+};
+
+int __init msm_mdf_init(void)
+{
+	/* Only need to monitor SSR from ADSP, which
+	 * is the master DSP managing MDF memory.
+	 */
+	ssr_handle = subsys_notif_register_notifier("adsp", &nb);
+	return platform_driver_register(&msm_mdf_driver);
+}
+
+void __exit msm_mdf_exit(void)
+{
+	platform_driver_unregister(&msm_mdf_driver);
+
+	if (ssr_handle)
+		subsys_notif_unregister_notifier(ssr_handle, &nb);
+}
+
+MODULE_DESCRIPTION("MSM MDF Module");
+MODULE_LICENSE("GPL v2");

+ 2 - 0
dsp/q6_init.c

@@ -30,11 +30,13 @@ static int __init audio_q6_init(void)
 	msm_audio_ion_init();
 	audio_slimslave_init();
 	avtimer_init();
+	msm_mdf_init();
 	return 0;
 }
 
 static void __exit audio_q6_exit(void)
 {
+	msm_mdf_exit();
 	avtimer_exit();
 	audio_slimslave_exit();
 	msm_audio_ion_exit();

+ 14 - 0
dsp/q6_init.h

@@ -26,6 +26,20 @@ int rtac_init(void);
 int msm_audio_ion_init(void);
 int audio_slimslave_init(void);
 int avtimer_init(void);
+#ifdef CONFIG_MSM_MDF
+int msm_mdf_init(void);
+void msm_mdf_exit(void);
+#else
+static inline int msm_mdf_init(void)
+{
+	return 0;
+}
+
+static inline void msm_mdf_exit(void)
+{
+	return;
+}
+#endif
 
 void avtimer_exit(void);
 void audio_slimslave_exit(void);

+ 132 - 6
dsp/q6core.c

@@ -39,6 +39,8 @@
 
 #define ADSP_STATE_READY_TIMEOUT_MS 3000
 
+#define APR_ENOTREADY 10
+
 enum {
 	META_CAL,
 	CUST_TOP_CAL,
@@ -274,12 +276,24 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
 		case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
 			pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS status[0x%x]\n",
 				__func__, payload1[1]);
+			/* -ADSP status to match Linux error standard */
+			q6core_lcl.adsp_status = -payload1[1];
 			q6core_lcl.bus_bw_resp_received = 1;
 			wake_up(&q6core_lcl.bus_bw_req_wait);
 			break;
 		case AVCS_CMD_SHARED_MEM_MAP_REGIONS:
 			pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_MAP_REGIONS status[0x%x]\n",
 				__func__, payload1[1]);
+			/* -ADSP status to match Linux error standard */
+			q6core_lcl.adsp_status = -payload1[1];
+			q6core_lcl.bus_bw_resp_received = 1;
+			wake_up(&q6core_lcl.bus_bw_req_wait);
+			break;
+		case AVCS_CMD_MAP_MDF_SHARED_MEMORY:
+			pr_debug("%s: Cmd = AVCS_CMD_MAP_MDF_SHARED_MEMORY status[0x%x]\n",
+				__func__, payload1[1]);
+			/* -ADSP status to match Linux error standard */
+			q6core_lcl.adsp_status = -payload1[1];
 			q6core_lcl.bus_bw_resp_received = 1;
 			wake_up(&q6core_lcl.bus_bw_req_wait);
 			break;
@@ -923,7 +937,7 @@ bail:
 }
 EXPORT_SYMBOL(q6core_is_adsp_ready);
 
-static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
+int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
 			uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
 {
 	struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
@@ -951,7 +965,7 @@ static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
 	mmap_regions->hdr.dest_port = 0;
 	mmap_regions->hdr.token = 0;
 	mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
-	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
+	mmap_regions->mem_pool_id = mempool_id & 0x00ff;
 	mmap_regions->num_regions = bufcnt & 0x00ff;
 	mmap_regions->property_flag = 0x00;
 
@@ -971,6 +985,7 @@ static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
 		__func__, buf_add, bufsz[0], mmap_regions->num_regions);
 
 	*map_handle = 0;
+	q6core_lcl.adsp_status = 0;
 	q6core_lcl.bus_bw_resp_received = 0;
 	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
 		mmap_regions);
@@ -988,6 +1003,16 @@ static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
 		pr_err("%s: timeout. waited for memory map\n", __func__);
 		ret = -ETIME;
 		goto done;
+	} else {
+		/* set ret to 0 as no timeout happened */
+		ret = 0;
+	}
+
+	if (q6core_lcl.adsp_status < 0) {
+		pr_err("%s: DSP returned error %d\n",
+			__func__, q6core_lcl.adsp_status);
+		ret = q6core_lcl.adsp_status;
+		goto done;
 	}
 
 	*map_handle = q6core_lcl.mem_map_cal_handle;
@@ -996,7 +1021,7 @@ done:
 	return ret;
 }
 
-static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
+int q6core_memory_unmap_regions(uint32_t mem_map_handle)
 {
 	struct avs_cmd_shared_mem_unmap_regions unmap_regions;
 	int ret = 0;
@@ -1015,6 +1040,7 @@ static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
 	unmap_regions.hdr.opcode = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS;
 	unmap_regions.mem_map_handle = mem_map_handle;
 
+	q6core_lcl.adsp_status = 0;
 	q6core_lcl.bus_bw_resp_received = 0;
 
 	pr_debug("%s: unmap regions map handle %d\n",
@@ -1037,11 +1063,110 @@ static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
 		       __func__);
 		ret = -ETIME;
 		goto done;
+	} else {
+		/* set ret to 0 as no timeout happened */
+		ret = 0;
+	}
+	if (q6core_lcl.adsp_status < 0) {
+		pr_err("%s: DSP returned error %d\n",
+			__func__, q6core_lcl.adsp_status);
+		ret = q6core_lcl.adsp_status;
+		goto done;
 	}
 done:
 	return ret;
 }
 
+
+int q6core_map_mdf_shared_memory(uint32_t map_handle, phys_addr_t *buf_add,
+			uint32_t proc_id, uint32_t *bufsz, uint32_t bufcnt)
+{
+	struct avs_cmd_map_mdf_shared_memory *mmap_regions = NULL;
+	struct avs_shared_map_region_payload *mregions = NULL;
+	void *mmap_region_cmd = NULL;
+	void *payload = NULL;
+	int ret = 0;
+	int i = 0;
+	int cmd_size = 0;
+
+	cmd_size = sizeof(struct avs_cmd_map_mdf_shared_memory)
+			+ sizeof(struct avs_shared_map_region_payload)
+			* bufcnt;
+
+	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (mmap_region_cmd == NULL)
+		return -ENOMEM;
+
+	mmap_regions = (struct avs_cmd_map_mdf_shared_memory *)mmap_region_cmd;
+	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						APR_HDR_LEN(APR_HDR_SIZE),
+								APR_PKT_VER);
+	mmap_regions->hdr.pkt_size = cmd_size;
+	mmap_regions->hdr.src_port = 0;
+	mmap_regions->hdr.dest_port = 0;
+	mmap_regions->hdr.token = 0;
+	mmap_regions->hdr.opcode = AVCS_CMD_MAP_MDF_SHARED_MEMORY;
+	mmap_regions->mem_map_handle = map_handle;
+	mmap_regions->proc_id = proc_id & 0x00ff;
+	mmap_regions->num_regions = bufcnt & 0x00ff;
+
+	payload = ((u8 *) mmap_region_cmd +
+				sizeof(struct avs_cmd_map_mdf_shared_memory));
+	mregions = (struct avs_shared_map_region_payload *)payload;
+
+	for (i = 0; i < bufcnt; i++) {
+		mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
+		mregions->shm_addr_msw =
+				msm_audio_populate_upper_32_bits(buf_add[i]);
+		mregions->mem_size_bytes = bufsz[i];
+		++mregions;
+	}
+
+	pr_debug("%s: sending mdf memory map, addr %pa, size %d, bufcnt = %d\n",
+		__func__, buf_add, bufsz[0], mmap_regions->num_regions);
+
+	q6core_lcl.adsp_status = 0;
+	q6core_lcl.bus_bw_resp_received = 0;
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
+		mmap_regions);
+	if (ret < 0) {
+		pr_err("%s: mdf memory map failed %d\n",
+			__func__, ret);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: timeout. waited for mdf memory map\n",
+			__func__);
+		ret = -ETIME;
+		goto done;
+	} else {
+		/* set ret to 0 as no timeout happened */
+		ret = 0;
+	}
+
+	/*
+	 * When the remote DSP is not ready, the ADSP will validate and store
+	 * the memory information and return APR_ENOTREADY to HLOS. The ADSP
+	 * will map the memory with remote DSP when it is ready. HLOS should
+	 * not treat APR_ENOTREADY as an error.
+	 */
+	if (q6core_lcl.adsp_status != -APR_ENOTREADY) {
+		pr_err("%s: DSP returned error %d\n",
+			__func__, q6core_lcl.adsp_status);
+		ret = q6core_lcl.adsp_status;
+		goto done;
+	}
+
+done:
+	kfree(mmap_region_cmd);
+	return ret;
+}
+
 static int q6core_dereg_all_custom_topologies(void)
 {
 	int ret = 0;
@@ -1119,10 +1244,11 @@ static int q6core_send_custom_topologies(void)
 
 	q6core_dereg_all_custom_topologies();
 
-	ret = q6core_map_memory_regions(&cal_block->cal_data.paddr, 0,
+	ret = q6core_map_memory_regions(&cal_block->cal_data.paddr,
+		ADSP_MEMORY_MAP_SHMEM8_4K_POOL,
 		(uint32_t *)&cal_block->map_data.map_size, 1,
 		&cal_block->map_data.q6map_handle);
-	if (!ret)  {
+	if (ret)  {
 		pr_err("%s: q6core_map_memory_regions failed\n", __func__);
 		goto unlock;
 	}
@@ -1172,7 +1298,7 @@ static int q6core_send_custom_topologies(void)
 		ret = q6core_lcl.adsp_status;
 unmap:
 	ret2 = q6core_memory_unmap_regions(cal_block->map_data.q6map_handle);
-	if (!ret2)  {
+	if (ret2)  {
 		pr_err("%s: q6core_memory_unmap_regions failed for map handle %d\n",
 			__func__, cal_block->map_data.q6map_handle);
 		ret = ret2;

+ 48 - 0
include/dsp/apr_audio-v2.h

@@ -4521,6 +4521,7 @@ struct afe_param_id_lpass_core_shared_clk_cfg {
 #define ADSP_MEMORY_MAP_SMI_POOL      1
 #define ADSP_MEMORY_MAP_IMEM_POOL      2
 #define ADSP_MEMORY_MAP_SHMEM8_4K_POOL      3
+#define ADSP_MEMORY_MAP_MDF_SHMEM_4K_POOL   4
 
 /* Definition of virtual memory flag */
 #define ADSP_MEMORY_MAP_VIRTUAL_MEMORY 1
@@ -4645,6 +4646,53 @@ struct avs_cmdrsp_shared_mem_map_regions {
 
 } __packed;
 
+#define AVS_MDF_MDSP_PROC_ID	 0x2
+#define AVS_MDF_SSC_PROC_ID      0x3
+#define AVS_MDF_CDSP_PROC_ID	 0x4
+
+/* Shared memory map command payload used by the
+ * #AVCS_CMD_MAP_MDF_SHARED_MEMORY.
+ *
+ * This structure allows clients to map multiple shared memory
+ * regions with remote processor ID. All mapped regions must be
+ * from the same memory pool. Following this structure are
+ * num_regions of avs_shared_map_region_payload.
+ */
+struct avs_cmd_map_mdf_shared_memory {
+	struct apr_hdr hdr;
+	uint32_t mem_map_handle;
+/* Unique identifier for the shared memory address.
+ *
+ * The aDSP returns this handle for
+ * #AVCS_CMD_SHARED_MEM_MAP_REGIONS
+ *
+ * Supported values:
+ * Any 32-bit value
+ *
+ * The aDSP uses this handle to retrieve the shared memory
+ * attributes. This handle can be an abstract representation
+ * of the shared memory regions that are being mapped.
+ */
+
+	uint32_t proc_id;
+/* Supported values:
+ * #AVS_MDF_MDSP_PROC_ID
+ * #AVS_MDF_SSC_PROC_ID
+ * #AVS_MDF_CDSP_PROC_ID
+ */
+
+	uint32_t num_regions;
+/* Number of regions to be mapped with the remote DSP processor
+ * mentioned by proc_id field.
+ *
+ * Array of structures of avs_shared_map_region_payload will follow.
+ * The address fields in those arrays should correspond to the remote
+ * processor mentioned by proc_id.
+ * In case of DSPs with SMMU enabled, the address should be IOVA.
+ * And for DSPs without SMMU, the address should be physical address.
+ */
+} __packed;
+
 /*adsp_audio_memmap_api.h*/
 
 /* ASM related data structures */

+ 1 - 0
include/dsp/msm_audio_ion.h

@@ -33,4 +33,5 @@ int msm_audio_ion_mmap(struct audio_buffer *abuff, struct vm_area_struct *vma);
 int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op);
 
 u32 msm_audio_populate_upper_32_bits(dma_addr_t pa);
+int msm_audio_ion_get_smmu_info(struct device **cb_dev, u64 *smmu_sid);
 #endif /* _LINUX_MSM_AUDIO_ION_H */

+ 44 - 0
include/dsp/msm_mdf.h

@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_MDF_H
+#define _LINUX_MSM_MDF_H
+
+#ifdef CONFIG_MSM_MDF
+
+/**
+ *  msm_mdf_mem_init - allocate and map memory to ADSP be shared
+ *                     across multiple remote DSPs.
+ */
+int msm_mdf_mem_init(void);
+
+/**
+ *  msm_mdf_mem_init - unmap and free memory to ADSP.
+ */
+int msm_mdf_mem_deinit(void);
+
+#else
+
+static inline int msm_mdf_mem_init(void)
+{
+	return 0;
+}
+
+static inline int msm_mdf_mem_deinit(void)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_MDF */
+
+#endif /* _LINUX_MSM_MDF_H */

+ 20 - 1
include/dsp/q6core.h

@@ -110,6 +110,20 @@ struct avcs_cmdrsp_get_license_validation_result {
 #define AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS                          0x00012925
 #define AVCS_CMD_SHARED_MEM_UNMAP_REGIONS                           0x00012926
 
+/* Commands the AVCS to map multiple shared memory regions with remote
+ * processor ID. All mapped regions must be from the same memory pool.
+ *
+ * Return:
+ * ADSP_EOK        : SUCCESS
+ * ADSP_EHANDLE    : Failed due to incorrect handle.
+ * ADSP_EBADPARAM  : Failed due to bad parameters.
+ *
+ * Dependencies:
+ * The mem_map_handle should be obtained earlier
+ * using AVCS_CMD_SHARED_MEM_MAP_REGIONS with pool ID
+ * ADSP_MEMORY_MAP_MDF_SHMEM_4K_POOL.
+ */
+#define AVCS_CMD_MAP_MDF_SHARED_MEMORY                              0x00012930
 
 #define AVCS_CMD_REGISTER_TOPOLOGIES                                0x00012923
 
@@ -183,10 +197,15 @@ struct avcs_cmd_load_unload_topo_modules {
 } __packed;
 
 
+int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
+			uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle);
+int q6core_memory_unmap_regions(uint32_t mem_map_handle);
+int q6core_map_mdf_shared_memory(uint32_t map_handle, phys_addr_t *buf_add,
+			uint32_t proc_id, uint32_t *bufsz, uint32_t bufcnt);
+
 int32_t core_set_license(uint32_t key, uint32_t module_id);
 int32_t core_get_license_status(uint32_t module_id);
 
 int32_t q6core_load_unload_topo_modules(uint32_t topology_id,
 			bool preload_type);
-
 #endif /* __Q6CORE_H__ */