Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (59 commits) x86/gart: Do not select AGP for GART_IOMMU x86/amd-iommu: Initialize passthrough mode when requested x86/amd-iommu: Don't detach device from pt domain on driver unbind x86/amd-iommu: Make sure a device is assigned in passthrough mode x86/amd-iommu: Align locking between attach_device and detach_device x86/amd-iommu: Fix device table write order x86/amd-iommu: Add passthrough mode initialization functions x86/amd-iommu: Add core functions for pd allocation/freeing x86/dma: Mark iommu_pass_through as __read_mostly x86/amd-iommu: Change iommu_map_page to support multiple page sizes x86/amd-iommu: Support higher level PTEs in iommu_page_unmap x86/amd-iommu: Remove old page table handling macros x86/amd-iommu: Use 2-level page tables for dma_ops domains x86/amd-iommu: Remove bus_addr check in iommu_map_page x86/amd-iommu: Remove last usages of IOMMU_PTE_L0_INDEX x86/amd-iommu: Change alloc_pte to support 64 bit address space x86/amd-iommu: Introduce increase_address_space function x86/amd-iommu: Flush domains if address space size was increased x86/amd-iommu: Introduce set_dte_entry function x86/amd-iommu: Add a gneric version of amd_iommu_flush_all_devices ...
This commit is contained in:
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
|
||||
return 0;
|
||||
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_direct_offset(dev);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - get_dma_direct_offset(dev);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
|
@@ -24,50 +24,12 @@
|
||||
int swiotlb __read_mostly;
|
||||
unsigned int ppc_swiotlb_enable;
|
||||
|
||||
void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
|
||||
{
|
||||
unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
|
||||
void *pageaddr = page_address(pfn_to_page(pfn));
|
||||
|
||||
if (pageaddr != NULL)
|
||||
return pageaddr + (addr % PAGE_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_direct_offset(hwdev);
|
||||
}
|
||||
|
||||
phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
|
||||
|
||||
{
|
||||
return baddr - get_dma_direct_offset(hwdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if an address needs bounce buffering via swiotlb.
|
||||
* Going forward I expect the swiotlb code to generalize on using
|
||||
* a dma_ops->addr_needs_map, and this function will move from here to the
|
||||
* generic swiotlb code.
|
||||
*/
|
||||
int
|
||||
swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
|
||||
size_t size)
|
||||
{
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
return dma_ops->addr_needs_map(hwdev, addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if an address is reachable by a pci device, or if we must bounce.
|
||||
*/
|
||||
static int
|
||||
swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
u64 mask = dma_get_mask(hwdev);
|
||||
dma_addr_t max;
|
||||
struct pci_controller *hose;
|
||||
struct pci_dev *pdev = to_pci_dev(hwdev);
|
||||
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
|
||||
if ((addr + size > max) | (addr < hose->dma_window_base_cur))
|
||||
return 1;
|
||||
|
||||
return !is_buffer_dma_capable(mask, addr, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* At the moment, all platforms that use this code only require
|
||||
* swiotlb to be used if we're operating on HIGHMEM. Since
|
||||
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.addr_needs_map = swiotlb_addr_needs_map,
|
||||
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
|
||||
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
|
Reference in New Issue
Block a user