Merge tag 'iommu-updates-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "This time with: - Generic page-table framework for ARM IOMMUs using the LPAE page-table format, ARM-SMMU and Renesas IPMMU make use of it already. - Break out the IO virtual address allocator from the Intel IOMMU so that it can be used by other DMA-API implementations too. The first user will be the ARM64 common DMA-API implementation for IOMMUs - Device tree support for Renesas IPMMU - Various fixes and cleanups all over the place" * tag 'iommu-updates-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits) iommu/amd: Convert non-returned local variable to boolean when relevant iommu: Update my email address iommu/amd: Use wait_event in put_pasid_state_wait iommu/amd: Fix amd_iommu_free_device() iommu/arm-smmu: Avoid build warning iommu/fsl: Various cleanups iommu/fsl: Use %pa to print phys_addr_t iommu/omap: Print phys_addr_t using %pa iommu: Make more drivers depend on COMPILE_TEST iommu/ipmmu-vmsa: Fix IOMMU lookup when multiple IOMMUs are registered iommu: Disable on !MMU builds iommu/fsl: Remove unused fsl_of_pamu_ids[] iommu/fsl: Fix section mismatch iommu/ipmmu-vmsa: Use the ARM LPAE page table allocator iommu: Fix trace_map() to report original iova and original size iommu/arm-smmu: add support for iova_to_phys through ATS1PR iopoll: Introduce memory-mapped IO polling macros iommu/arm-smmu: don't touch the secure STLBIALL register iommu/arm-smmu: make use of generic LPAE allocator iommu: io-pgtable-arm: add non-secure quirk ...
This commit is contained in:
144
include/linux/iopoll.h
Normal file
144
include/linux/iopoll.h
Normal file
@@ -0,0 +1,144 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_IOPOLL_H
|
||||
#define _LINUX_IOPOLL_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/**
|
||||
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
|
||||
* @op: accessor function (takes @addr as its only argument)
|
||||
* @addr: Address to poll
|
||||
* @val: Variable to read the value into
|
||||
* @cond: Break condition (usually involving @val)
|
||||
* @sleep_us: Maximum time to sleep between reads in us (0
|
||||
* tight-loops). Should be less than ~20ms since usleep_range
|
||||
* is used (see Documentation/timers/timers-howto.txt).
|
||||
* @timeout_us: Timeout in us, 0 means never timeout
|
||||
*
|
||||
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
|
||||
* case, the last read value at @addr is stored in @val. Must not
|
||||
* be called from atomic context if sleep_us or timeout_us are used.
|
||||
*
|
||||
* When available, you'll probably want to use one of the specialized
|
||||
* macros defined below rather than this macro directly.
|
||||
*/
|
||||
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
|
||||
({ \
|
||||
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
might_sleep_if(sleep_us); \
|
||||
for (;;) { \
|
||||
(val) = op(addr); \
|
||||
if (cond) \
|
||||
break; \
|
||||
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
|
||||
(val) = op(addr); \
|
||||
break; \
|
||||
} \
|
||||
if (sleep_us) \
|
||||
usleep_range((sleep_us >> 2) + 1, sleep_us); \
|
||||
} \
|
||||
(cond) ? 0 : -ETIMEDOUT; \
|
||||
})
|
||||
|
||||
/**
|
||||
* readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
|
||||
* @op: accessor function (takes @addr as its only argument)
|
||||
* @addr: Address to poll
|
||||
* @val: Variable to read the value into
|
||||
* @cond: Break condition (usually involving @val)
|
||||
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
|
||||
* be less than ~10us since udelay is used (see
|
||||
* Documentation/timers/timers-howto.txt).
|
||||
* @timeout_us: Timeout in us, 0 means never timeout
|
||||
*
|
||||
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
|
||||
* case, the last read value at @addr is stored in @val.
|
||||
*
|
||||
* When available, you'll probably want to use one of the specialized
|
||||
* macros defined below rather than this macro directly.
|
||||
*/
|
||||
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
|
||||
({ \
|
||||
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
for (;;) { \
|
||||
(val) = op(addr); \
|
||||
if (cond) \
|
||||
break; \
|
||||
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
|
||||
(val) = op(addr); \
|
||||
break; \
|
||||
} \
|
||||
if (delay_us) \
|
||||
udelay(delay_us); \
|
||||
} \
|
||||
(cond) ? 0 : -ETIMEDOUT; \
|
||||
})
|
||||
|
||||
|
||||
#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
|
||||
readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
|
||||
|
||||
#endif /* _LINUX_IOPOLL_H */
|
@@ -16,9 +16,6 @@
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
/* IO virtual address start page frame number */
|
||||
#define IOVA_START_PFN (1)
|
||||
|
||||
/* iova structure */
|
||||
struct iova {
|
||||
struct rb_node node;
|
||||
@@ -31,6 +28,8 @@ struct iova_domain {
|
||||
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
||||
struct rb_root rbroot; /* iova domain rbtree root */
|
||||
struct rb_node *cached32_node; /* Save last alloced node */
|
||||
unsigned long granule; /* pfn granularity for this domain */
|
||||
unsigned long start_pfn; /* Lower limit for this domain */
|
||||
unsigned long dma_32bit_pfn;
|
||||
};
|
||||
|
||||
@@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova)
|
||||
return iova->pfn_hi - iova->pfn_lo + 1;
|
||||
}
|
||||
|
||||
static inline unsigned long iova_shift(struct iova_domain *iovad)
|
||||
{
|
||||
return __ffs(iovad->granule);
|
||||
}
|
||||
|
||||
static inline unsigned long iova_mask(struct iova_domain *iovad)
|
||||
{
|
||||
return iovad->granule - 1;
|
||||
}
|
||||
|
||||
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
|
||||
{
|
||||
return iova & iova_mask(iovad);
|
||||
}
|
||||
|
||||
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
||||
{
|
||||
return ALIGN(size, iovad->granule);
|
||||
}
|
||||
|
||||
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
||||
{
|
||||
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
||||
}
|
||||
|
||||
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
||||
{
|
||||
return iova >> iova_shift(iovad);
|
||||
}
|
||||
|
||||
int iommu_iova_cache_init(void);
|
||||
void iommu_iova_cache_destroy(void);
|
||||
|
||||
struct iova *alloc_iova_mem(void);
|
||||
void free_iova_mem(struct iova *iova);
|
||||
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
@@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
||||
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||
unsigned long pfn_hi);
|
||||
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
unsigned long start_pfn, unsigned long pfn_32bit);
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
void put_iova_domain(struct iova_domain *iovad);
|
||||
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
||||
|
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
* IPMMU VMSA Platform Data
|
||||
*
|
||||
* Copyright (C) 2014 Renesas Electronics Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*/
|
||||
|
||||
#ifndef __IPMMU_VMSA_H__
|
||||
#define __IPMMU_VMSA_H__
|
||||
|
||||
struct ipmmu_vmsa_master {
|
||||
const char *name;
|
||||
unsigned int utlb;
|
||||
};
|
||||
|
||||
struct ipmmu_vmsa_platform_data {
|
||||
const struct ipmmu_vmsa_master *masters;
|
||||
unsigned int num_masters;
|
||||
};
|
||||
|
||||
#endif /* __IPMMU_VMSA_H__ */
|
Reference in New Issue
Block a user