Merge branch 'topic/dma' into next

Merge hch's big DMA rework series. This is in a topic branch in case he
wants to merge it to minimise conflicts.
This commit is contained in:
Michael Ellerman
2019-02-21 23:15:10 +11:00
کامیت d0055df0c9
48فایلهای تغییر یافته به همراه318 افزوده شده و 1159 حذف شده

مشاهده پرونده

@@ -216,7 +216,7 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
* their parent device so drivers shouldn't be doing DMA
* operations directly on these devices.
*/
set_dma_ops(&npe->pdev->dev, NULL);
set_dma_ops(&npe->pdev->dev, &dma_dummy_ops);
}
/*

مشاهده پرونده

@@ -1746,7 +1746,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_dma_offset(&pdev->dev, pe->tce_bypass_base);
pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
/*
* Note: iommu_add_device() will fail here as
@@ -1756,31 +1756,6 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
*/
}
static bool pnv_pci_ioda_pe_single_vendor(struct pnv_ioda_pe *pe)
{
unsigned short vendor = 0;
struct pci_dev *pdev;
if (pe->device_count == 1)
return true;
/* pe->pdev should be set if it's a single device, pe->pbus if not */
if (!pe->pbus)
return true;
list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
if (!vendor) {
vendor = pdev->vendor;
continue;
}
if (pdev->vendor != vendor)
return false;
}
return true;
}
/*
* Reconfigure TVE#0 to be usable as 64-bit DMA space.
*
@@ -1850,88 +1825,45 @@ err:
return -EIO;
}
static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
u64 dma_mask)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
uint64_t top;
bool bypass = false;
s64 rc;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
return -ENODEV;
pe = &phb->ioda.pe_array[pdn->pe_number];
if (pe->tce_bypass_enabled) {
top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
bypass = (dma_mask >= top);
u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
if (dma_mask >= top)
return true;
}
if (bypass) {
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else {
/*
* If the device can't set the TCE bypass bit but still wants
* to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
* bypass the 32-bit region and be usable for 64-bit DMAs.
* The device needs to be able to address all of this space.
*/
if (dma_mask >> 32 &&
dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
pnv_pci_ioda_pe_single_vendor(pe) &&
phb->model == PNV_PHB_MODEL_PHB3) {
/* Configure the bypass mode */
rc = pnv_pci_ioda_dma_64bit_bypass(pe);
if (rc)
return rc;
/* 4GB offset bypasses 32-bit space */
set_dma_offset(&pdev->dev, (1ULL << 32));
set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
/*
* Fail the request if a DMA mask between 32 and 64 bits
* was requested but couldn't be fulfilled. Ideally we
* would do this for 64-bits but historically we have
* always fallen back to 32-bits.
*/
return -ENOMEM;
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
}
/*
* If the device can't set the TCE bypass bit but still wants
* to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
* bypass the 32-bit region and be usable for 64-bit DMAs.
* The device needs to be able to address all of this space.
*/
if (dma_mask >> 32 &&
dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
/* pe->pdev should be set if it's a single device, pe->pbus if not */
(pe->device_count == 1 || !pe->pbus) &&
phb->model == PNV_PHB_MODEL_PHB3) {
/* Configure the bypass mode */
s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
if (rc)
return rc;
/* 4GB offset bypasses 32-bit space */
pdev->dev.archdata.dma_offset = (1ULL << 32);
return true;
}
*pdev->dev.dma_mask = dma_mask;
/* Update peer npu devices */
pnv_npu_try_dma_set_bypass(pdev, bypass);
return 0;
}
static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
u64 end, mask;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
return 0;
pe = &phb->ioda.pe_array[pdn->pe_number];
if (!pe->tce_bypass_enabled)
return __dma_get_required_mask(&pdev->dev);
end = pe->tce_bypass_base + memblock_end_of_DRAM();
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
return false;
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
@@ -1940,7 +1872,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
set_dma_offset(&dev->dev, pe->tce_bypass_base);
dev->dev.archdata.dma_offset = pe->tce_bypass_base;
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
@@ -3659,6 +3591,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
.dma_bus_setup = pnv_pci_dma_bus_setup,
.iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
.enable_device_hook = pnv_pci_enable_device_hook,
@@ -3666,19 +3599,9 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.window_alignment = pnv_pci_window_alignment,
.setup_bridge = pnv_pci_setup_bridge,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.dma_set_mask = pnv_pci_ioda_dma_set_mask,
.dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
.shutdown = pnv_pci_ioda_shutdown,
};
static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
{
dev_err_once(&npdev->dev,
"%s operation unsupported for NVLink devices\n",
__func__);
return -EPERM;
}
static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
.setup_msi_irqs = pnv_setup_msi_irqs,
@@ -3686,7 +3609,6 @@ static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.enable_device_hook = pnv_pci_enable_device_hook,
.window_alignment = pnv_pci_window_alignment,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.dma_set_mask = pnv_npu_dma_set_mask,
.shutdown = pnv_pci_ioda_shutdown,
.disable_device = pnv_npu_disable_device,
};