sparc: Revert generic IOMMU allocator.
I applied the wrong version of this patch series, V4 instead of V10, due to a patchwork bundling snafu. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -15,8 +15,6 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/iommu-common.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/irq.h>
|
||||
@@ -30,7 +28,6 @@
|
||||
|
||||
#define DRIVER_NAME "pci_sun4v"
|
||||
#define PFX DRIVER_NAME ": "
|
||||
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
|
||||
|
||||
static unsigned long vpci_major = 1;
|
||||
static unsigned long vpci_minor = 1;
|
||||
@@ -158,13 +155,14 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
|
||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
|
||||
__this_cpu_read(iommu_pool_hash));
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
entry = iommu_range_alloc(dev, iommu, npages, NULL);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
if (unlikely(entry == DMA_ERROR_CODE))
|
||||
goto range_alloc_fail;
|
||||
|
||||
*dma_addrp = (iommu->tbl.page_table_map_base +
|
||||
*dma_addrp = (iommu->page_table_map_base +
|
||||
(entry << IO_PAGE_SHIFT));
|
||||
ret = (void *) first_page;
|
||||
first_page = __pa(first_page);
|
||||
@@ -190,46 +188,45 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
return ret;
|
||||
|
||||
iommu_map_fail:
|
||||
iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, false, NULL);
|
||||
/* Interrupts are disabled. */
|
||||
spin_lock(&iommu->lock);
|
||||
iommu_range_free(iommu, *dma_addrp, npages);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
range_alloc_fail:
|
||||
free_pages(first_page, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
|
||||
unsigned long npages)
|
||||
{
|
||||
u32 devhandle = *(u32 *)demap_arg;
|
||||
unsigned long num, flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
do {
|
||||
num = pci_sun4v_iommu_demap(devhandle,
|
||||
HV_PCI_TSBID(0, entry),
|
||||
npages);
|
||||
|
||||
entry += num;
|
||||
npages -= num;
|
||||
} while (npages != 0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
||||
dma_addr_t dvma, struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
unsigned long order, npages, entry;
|
||||
unsigned long flags, order, npages, entry;
|
||||
u32 devhandle;
|
||||
|
||||
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
|
||||
iommu = dev->archdata.iommu;
|
||||
pbm = dev->archdata.host_controller;
|
||||
devhandle = pbm->devhandle;
|
||||
entry = ((dvma - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT);
|
||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
||||
iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL);
|
||||
entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
iommu_range_free(iommu, dvma, npages);
|
||||
|
||||
do {
|
||||
unsigned long num;
|
||||
|
||||
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
|
||||
npages);
|
||||
entry += num;
|
||||
npages -= num;
|
||||
} while (npages != 0);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
order = get_order(size);
|
||||
if (order < 10)
|
||||
free_pages((unsigned long)cpu, order);
|
||||
@@ -256,13 +253,14 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
||||
npages >>= IO_PAGE_SHIFT;
|
||||
|
||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
|
||||
__this_cpu_read(iommu_pool_hash));
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
entry = iommu_range_alloc(dev, iommu, npages, NULL);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
if (unlikely(entry == DMA_ERROR_CODE))
|
||||
goto bad;
|
||||
|
||||
bus_addr = (iommu->tbl.page_table_map_base +
|
||||
bus_addr = (iommu->page_table_map_base +
|
||||
(entry << IO_PAGE_SHIFT));
|
||||
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
||||
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
||||
@@ -292,7 +290,11 @@ bad:
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
iommu_map_fail:
|
||||
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL);
|
||||
/* Interrupts are disabled. */
|
||||
spin_lock(&iommu->lock);
|
||||
iommu_range_free(iommu, bus_addr, npages);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
@@ -302,7 +304,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
unsigned long npages;
|
||||
unsigned long flags, npages;
|
||||
long entry;
|
||||
u32 devhandle;
|
||||
|
||||
@@ -319,9 +321,22 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
|
||||
npages >>= IO_PAGE_SHIFT;
|
||||
bus_addr &= IO_PAGE_MASK;
|
||||
entry = (bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT;
|
||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
||||
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL);
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
iommu_range_free(iommu, bus_addr, npages);
|
||||
|
||||
entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
|
||||
do {
|
||||
unsigned long num;
|
||||
|
||||
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
|
||||
npages);
|
||||
entry += num;
|
||||
npages -= num;
|
||||
} while (npages != 0);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
@@ -356,14 +371,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
/* Init first segment length for backout at failure */
|
||||
outs->dma_length = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
iommu_batch_start(dev, prot, ~0UL);
|
||||
|
||||
max_seg_size = dma_get_max_seg_size(dev);
|
||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||
base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT;
|
||||
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
||||
|
||||
@@ -376,8 +391,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
/* Allocate iommu entries for that segment */
|
||||
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
|
||||
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
|
||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle,
|
||||
__this_cpu_read(iommu_pool_hash));
|
||||
entry = iommu_range_alloc(dev, iommu, npages, &handle);
|
||||
|
||||
/* Handle failure */
|
||||
if (unlikely(entry == DMA_ERROR_CODE)) {
|
||||
@@ -390,7 +404,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
iommu_batch_new_entry(entry);
|
||||
|
||||
/* Convert entry to a dma_addr_t */
|
||||
dma_addr = iommu->tbl.page_table_map_base +
|
||||
dma_addr = iommu->page_table_map_base +
|
||||
(entry << IO_PAGE_SHIFT);
|
||||
dma_addr |= (s->offset & ~IO_PAGE_MASK);
|
||||
|
||||
@@ -437,7 +451,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
if (unlikely(err < 0L))
|
||||
goto iommu_map_failed;
|
||||
|
||||
local_irq_restore(flags);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
if (outcount < incount) {
|
||||
outs = sg_next(outs);
|
||||
@@ -455,8 +469,7 @@ iommu_map_failed:
|
||||
vaddr = s->dma_address & IO_PAGE_MASK;
|
||||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||
IO_PAGE_SIZE);
|
||||
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
|
||||
false, NULL);
|
||||
iommu_range_free(iommu, vaddr, npages);
|
||||
/* XXX demap? XXX */
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_length = 0;
|
||||
@@ -464,7 +477,7 @@ iommu_map_failed:
|
||||
if (s == outs)
|
||||
break;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -476,7 +489,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
struct pci_pbm_info *pbm;
|
||||
struct scatterlist *sg;
|
||||
struct iommu *iommu;
|
||||
unsigned long flags, entry;
|
||||
unsigned long flags;
|
||||
u32 devhandle;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
@@ -485,27 +498,33 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
pbm = dev->archdata.host_controller;
|
||||
devhandle = pbm->devhandle;
|
||||
|
||||
local_irq_save(flags);
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
sg = sglist;
|
||||
while (nelems--) {
|
||||
dma_addr_t dma_handle = sg->dma_address;
|
||||
unsigned int len = sg->dma_length;
|
||||
unsigned long npages;
|
||||
struct iommu_table *tbl = &iommu->tbl;
|
||||
unsigned long shift = IO_PAGE_SHIFT;
|
||||
unsigned long npages, entry;
|
||||
|
||||
if (!len)
|
||||
break;
|
||||
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
|
||||
entry = ((dma_handle - tbl->page_table_map_base) >> shift);
|
||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
||||
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
|
||||
false, NULL);
|
||||
iommu_range_free(iommu, dma_handle, npages);
|
||||
|
||||
entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||
while (npages) {
|
||||
unsigned long num;
|
||||
|
||||
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
|
||||
npages);
|
||||
entry += num;
|
||||
npages -= num;
|
||||
}
|
||||
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static struct dma_map_ops sun4v_dma_ops = {
|
||||
@@ -517,8 +536,6 @@ static struct dma_map_ops sun4v_dma_ops = {
|
||||
.unmap_sg = dma_4v_unmap_sg,
|
||||
};
|
||||
|
||||
static struct iommu_tbl_ops dma_4v_iommu_ops;
|
||||
|
||||
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
|
||||
{
|
||||
struct property *prop;
|
||||
@@ -533,33 +550,30 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
|
||||
}
|
||||
|
||||
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
|
||||
struct iommu_table *iommu)
|
||||
struct iommu *iommu)
|
||||
{
|
||||
struct iommu_pool *pool;
|
||||
unsigned long i, pool_nr, cnt = 0;
|
||||
struct iommu_arena *arena = &iommu->arena;
|
||||
unsigned long i, cnt = 0;
|
||||
u32 devhandle;
|
||||
|
||||
devhandle = pbm->devhandle;
|
||||
for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
|
||||
pool = &(iommu->arena_pool[pool_nr]);
|
||||
for (i = pool->start; i <= pool->end; i++) {
|
||||
unsigned long ret, io_attrs, ra;
|
||||
for (i = 0; i < arena->limit; i++) {
|
||||
unsigned long ret, io_attrs, ra;
|
||||
|
||||
ret = pci_sun4v_iommu_getmap(devhandle,
|
||||
HV_PCI_TSBID(0, i),
|
||||
&io_attrs, &ra);
|
||||
if (ret == HV_EOK) {
|
||||
if (page_in_phys_avail(ra)) {
|
||||
pci_sun4v_iommu_demap(devhandle,
|
||||
HV_PCI_TSBID(0,
|
||||
i), 1);
|
||||
} else {
|
||||
cnt++;
|
||||
__set_bit(i, iommu->map);
|
||||
}
|
||||
ret = pci_sun4v_iommu_getmap(devhandle,
|
||||
HV_PCI_TSBID(0, i),
|
||||
&io_attrs, &ra);
|
||||
if (ret == HV_EOK) {
|
||||
if (page_in_phys_avail(ra)) {
|
||||
pci_sun4v_iommu_demap(devhandle,
|
||||
HV_PCI_TSBID(0, i), 1);
|
||||
} else {
|
||||
cnt++;
|
||||
__set_bit(i, arena->map);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
@@ -587,22 +601,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
|
||||
dma_offset = vdma[0];
|
||||
|
||||
/* Setup initial software IOMMU state. */
|
||||
spin_lock_init(&iommu->lock);
|
||||
iommu->ctx_lowest_free = 1;
|
||||
iommu->tbl.page_table_map_base = dma_offset;
|
||||
iommu->page_table_map_base = dma_offset;
|
||||
iommu->dma_addr_mask = dma_mask;
|
||||
|
||||
/* Allocate and initialize the free area map. */
|
||||
sz = (num_tsb_entries + 7) / 8;
|
||||
sz = (sz + 7UL) & ~7UL;
|
||||
iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
|
||||
if (!iommu->tbl.map) {
|
||||
iommu->arena.map = kzalloc(sz, GFP_KERNEL);
|
||||
if (!iommu->arena.map) {
|
||||
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
|
||||
&dma_4v_iommu_ops, false /* no large_pool */,
|
||||
0 /* default npools */);
|
||||
sz = probe_existing_entries(pbm, &iommu->tbl);
|
||||
iommu->arena.limit = num_tsb_entries;
|
||||
|
||||
sz = probe_existing_entries(pbm, iommu);
|
||||
if (sz)
|
||||
printk("%s: Imported %lu TSB entries from OBP\n",
|
||||
pbm->name, sz);
|
||||
@@ -1001,17 +1015,8 @@ static struct platform_driver pci_sun4v_driver = {
|
||||
.probe = pci_sun4v_probe,
|
||||
};
|
||||
|
||||
static void setup_iommu_pool_hash(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
|
||||
}
|
||||
|
||||
static int __init pci_sun4v_init(void)
|
||||
{
|
||||
setup_iommu_pool_hash();
|
||||
return platform_driver_register(&pci_sun4v_driver);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user