
The current derivation of pfns/page structures has a fair amount of complexity in it, and can be simplified. For example, there need not be completely separate cases for pages that are all remapped into the vmalloc space, but were allocated differently (i.e. atomic allocations vs contiguous allocations). So, simplify the handling for those, and leverage iommu_dma_mmap() and iommu_dma_get_sgtable(), as there is nothing fastmap-specific with the way pages are mapped into userspace and how we construct an sg-table respectively. Change-Id: Ia5725a93ee84f2c5e7ccc1d6e62f828338e83bd7 Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
99 lines
2.6 KiB
C
99 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2014-2015 ARM Ltd.
|
|
*/
|
|
#ifndef __DMA_IOMMU_H
|
|
#define __DMA_IOMMU_H
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/types.h>
|
|
|
|
#ifdef CONFIG_IOMMU_DMA
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/iommu.h>
|
|
#include <linux/msi.h>
|
|
|
|
struct iova_domain;
|
|
|
|
/* Domain management interface for IOMMU drivers */
|
|
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
|
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
|
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
|
|
|
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
|
unsigned long attrs);
|
|
size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad,
|
|
struct scatterlist *sg, int nents);
|
|
int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg,
|
|
int nents, dma_addr_t dma_addr);
|
|
void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents);
|
|
int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
unsigned long attrs);
|
|
int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
unsigned long attrs);
|
|
|
|
/* Setup call for arch DMA mapping code */
|
|
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
|
|
|
|
/* The DMA API isn't _quite_ the whole story, though... */
|
|
/*
|
|
* iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
|
|
*
|
|
* The MSI page will be stored in @desc.
|
|
*
|
|
* Return: 0 on success otherwise an error describing the failure.
|
|
*/
|
|
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
|
|
|
|
/* Update the MSI message if required. */
|
|
void iommu_dma_compose_msi_msg(struct msi_desc *desc,
|
|
struct msi_msg *msg);
|
|
|
|
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
|
|
|
#else /* CONFIG_IOMMU_DMA */
|
|
|
|
struct iommu_domain;
|
|
struct msi_desc;
|
|
struct msi_msg;
|
|
struct device;
|
|
|
|
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
|
|
u64 size)
|
|
{
|
|
}
|
|
|
|
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
|
|
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
|
|
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
|
{
|
|
}
|
|
|
|
static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
|
|
phys_addr_t msi_addr)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
|
|
struct msi_msg *msg)
|
|
{
|
|
}
|
|
|
|
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_IOMMU_DMA */
|
|
#endif /* __DMA_IOMMU_H */
|