[PATCH] PCI: altix: msi support
MSI callouts for altix. Involves a fair amount of code reorg in sn irq.c code as well as adding some extensions to the altix PCI provider abstaction. Signed-off-by: Mark Maule <maule@sgi.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Tento commit je obsažen v:

odevzdal
Greg Kroah-Hartman

rodič
10083072bf
revize
83821d3f55
@@ -11,7 +11,7 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/sn/pcibr_provider.h>
|
||||
#include <asm/sn/intr.h>
|
||||
#include <asm/sn/pcibus_provider_defs.h>
|
||||
#include <asm/sn/pcidev.h>
|
||||
#include <asm/sn/sn_sal.h>
|
||||
@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
* resources.
|
||||
*/
|
||||
|
||||
*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
|
||||
*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
|
||||
SN_DMA_ADDR_PHYS);
|
||||
if (!*dma_handle) {
|
||||
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
|
||||
free_pages((unsigned long)cpuaddr, get_order(size));
|
||||
@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
phys_addr = __pa(cpu_addr);
|
||||
dma_addr = provider->dma_map(pdev, phys_addr, size);
|
||||
dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
|
||||
if (!dma_addr) {
|
||||
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
|
||||
return 0;
|
||||
@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
for (i = 0; i < nhwentries; i++, sg++) {
|
||||
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
|
||||
sg->dma_address = provider->dma_map(pdev,
|
||||
phys_addr, sg->length);
|
||||
phys_addr, sg->length,
|
||||
SN_DMA_ADDR_PHYS);
|
||||
|
||||
if (!sg->dma_address) {
|
||||
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
|
||||
|
@@ -41,7 +41,7 @@ extern int sn_ioif_inited;
|
||||
|
||||
static dma_addr_t
|
||||
pcibr_dmamap_ate32(struct pcidev_info *info,
|
||||
u64 paddr, size_t req_size, u64 flags)
|
||||
u64 paddr, size_t req_size, u64 flags, int dma_flags)
|
||||
{
|
||||
|
||||
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
|
||||
@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
|
||||
if (IS_PCIX(pcibus_info))
|
||||
ate_flags &= ~(PCI32_ATE_PREF);
|
||||
|
||||
xio_addr =
|
||||
IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
|
||||
PHYS_TO_TIODMA(paddr);
|
||||
if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
|
||||
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
|
||||
PHYS_TO_TIODMA(paddr);
|
||||
else
|
||||
xio_addr = paddr;
|
||||
|
||||
offset = IOPGOFF(xio_addr);
|
||||
ate = ate_flags | (xio_addr - offset);
|
||||
|
||||
@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
|
||||
if (IS_PIC_SOFT(pcibus_info)) {
|
||||
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're mapping for MSI, set the MSI bit in the ATE
|
||||
*/
|
||||
if (dma_flags & SN_DMA_MSI)
|
||||
ate |= PCI32_ATE_MSI;
|
||||
|
||||
ate_write(pcibus_info, ate_index, ate_count, ate);
|
||||
|
||||
/*
|
||||
@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
|
||||
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
|
||||
ATE_SWAP_ON(pci_addr);
|
||||
|
||||
|
||||
return pci_addr;
|
||||
}
|
||||
|
||||
static dma_addr_t
|
||||
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
|
||||
u64 dma_attributes)
|
||||
u64 dma_attributes, int dma_flags)
|
||||
{
|
||||
struct pcibus_info *pcibus_info = (struct pcibus_info *)
|
||||
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
|
||||
u64 pci_addr;
|
||||
|
||||
/* Translate to Crosstalk View of Physical Address */
|
||||
pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
|
||||
PHYS_TO_TIODMA(paddr)) | dma_attributes;
|
||||
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
|
||||
pci_addr = IS_PIC_SOFT(pcibus_info) ?
|
||||
PHYS_TO_DMA(paddr) :
|
||||
PHYS_TO_TIODMA(paddr) | dma_attributes;
|
||||
else
|
||||
pci_addr = IS_PIC_SOFT(pcibus_info) ?
|
||||
paddr :
|
||||
paddr | dma_attributes;
|
||||
|
||||
/* Handle Bus mode */
|
||||
if (IS_PCIX(pcibus_info))
|
||||
@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
|
||||
((u64) pcibus_info->
|
||||
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
|
||||
} else
|
||||
pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
|
||||
pci_addr |= (dma_flags & SN_DMA_MSI) ?
|
||||
TIOCP_PCI64_CMDTYPE_MSI :
|
||||
TIOCP_PCI64_CMDTYPE_MEM;
|
||||
|
||||
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
|
||||
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
|
||||
@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
|
||||
|
||||
static dma_addr_t
|
||||
pcibr_dmatrans_direct32(struct pcidev_info * info,
|
||||
u64 paddr, size_t req_size, u64 flags)
|
||||
u64 paddr, size_t req_size, u64 flags, int dma_flags)
|
||||
{
|
||||
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
|
||||
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
|
||||
@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
|
||||
PHYS_TO_TIODMA(paddr);
|
||||
if (dma_flags & SN_DMA_MSI)
|
||||
return 0;
|
||||
|
||||
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
|
||||
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
|
||||
PHYS_TO_TIODMA(paddr);
|
||||
else
|
||||
xio_addr = paddr;
|
||||
|
||||
xio_base = pcibus_info->pbi_dir_xbase;
|
||||
offset = xio_addr - xio_base;
|
||||
@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr)
|
||||
*/
|
||||
|
||||
dma_addr_t
|
||||
pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
|
||||
pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
|
||||
{
|
||||
dma_addr_t dma_handle;
|
||||
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
|
||||
@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
|
||||
*/
|
||||
|
||||
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
|
||||
PCI64_ATTR_PREF);
|
||||
PCI64_ATTR_PREF, dma_flags);
|
||||
} else {
|
||||
/* Handle 32-63 bit cards via direct mapping */
|
||||
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
|
||||
size, 0);
|
||||
size, 0, dma_flags);
|
||||
if (!dma_handle) {
|
||||
/*
|
||||
* It is a 32 bit card and we cannot do direct mapping,
|
||||
@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
|
||||
*/
|
||||
|
||||
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
|
||||
size, PCI32_ATE_PREF);
|
||||
size, PCI32_ATE_PREF,
|
||||
dma_flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
|
||||
|
||||
dma_addr_t
|
||||
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
|
||||
size_t size)
|
||||
size_t size, int dma_flags)
|
||||
{
|
||||
dma_addr_t dma_handle;
|
||||
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
|
||||
|
||||
if (hwdev->dev.coherent_dma_mask == ~0UL) {
|
||||
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
|
||||
PCI64_ATTR_BAR);
|
||||
PCI64_ATTR_BAR, dma_flags);
|
||||
} else {
|
||||
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
|
||||
phys_addr, size,
|
||||
PCI32_ATE_BAR);
|
||||
PCI32_ATE_BAR, dma_flags);
|
||||
}
|
||||
|
||||
return dma_handle;
|
||||
|
@@ -515,10 +515,16 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
|
||||
* use the GART mapped mode.
|
||||
*/
|
||||
static u64
|
||||
tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count)
|
||||
tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
||||
{
|
||||
u64 mapaddr;
|
||||
|
||||
/*
|
||||
* Not supported for now ...
|
||||
*/
|
||||
if (dma_flags & SN_DMA_MSI)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If card is 64 or 48 bit addresable, use a direct mapping. 32
|
||||
* bit direct is so restrictive w.r.t. where the memory resides that
|
||||
|
@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
|
||||
(ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
|
||||
|
||||
#define ATE_VALID(ate) ((ate) & (1UL << 63))
|
||||
#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63))
|
||||
#define ATE_MAKE(addr, ps, msi) \
|
||||
(((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
|
||||
|
||||
/*
|
||||
* Flavors of ate-based mapping supported by tioce_alloc_map()
|
||||
@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
|
||||
*
|
||||
* 63 - must be 1 to indicate d64 mode to CE hardware
|
||||
* 62 - barrier bit ... controlled with tioce_dma_barrier()
|
||||
* 61 - 0 since this is not an MSI transaction
|
||||
* 61 - msi bit ... specified through dma_flags
|
||||
* 60:54 - reserved, MBZ
|
||||
*/
|
||||
static u64
|
||||
tioce_dma_d64(unsigned long ct_addr)
|
||||
tioce_dma_d64(unsigned long ct_addr, int dma_flags)
|
||||
{
|
||||
u64 bus_addr;
|
||||
|
||||
bus_addr = ct_addr | (1UL << 63);
|
||||
if (dma_flags & SN_DMA_MSI)
|
||||
bus_addr |= (1UL << 61);
|
||||
|
||||
return bus_addr;
|
||||
}
|
||||
@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
|
||||
*/
|
||||
static u64
|
||||
tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
u64 ct_addr, int len)
|
||||
u64 ct_addr, int len, int dma_flags)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
int entries;
|
||||
int nates;
|
||||
u64 pagesize;
|
||||
int msi_capable, msi_wanted;
|
||||
u64 *ate_shadow;
|
||||
u64 *ate_reg;
|
||||
u64 addr;
|
||||
@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
ate_reg = ce_mmr->ce_ure_ate3240;
|
||||
pagesize = ce_kern->ce_ate3240_pagesize;
|
||||
bus_base = TIOCE_M32_MIN;
|
||||
msi_capable = 1;
|
||||
break;
|
||||
case TIOCE_ATE_M40:
|
||||
first = 0;
|
||||
@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
ate_reg = ce_mmr->ce_ure_ate40;
|
||||
pagesize = MB(64);
|
||||
bus_base = TIOCE_M40_MIN;
|
||||
msi_capable = 0;
|
||||
break;
|
||||
case TIOCE_ATE_M40S:
|
||||
/*
|
||||
@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
ate_reg = ce_mmr->ce_ure_ate3240;
|
||||
pagesize = GB(16);
|
||||
bus_base = TIOCE_M40S_MIN;
|
||||
msi_capable = 0;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
msi_wanted = dma_flags & SN_DMA_MSI;
|
||||
if (msi_wanted && !msi_capable)
|
||||
return 0;
|
||||
|
||||
nates = ATE_NPAGES(ct_addr, len, pagesize);
|
||||
if (nates > entries)
|
||||
return 0;
|
||||
@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
for (j = 0; j < nates; j++) {
|
||||
u64 ate;
|
||||
|
||||
ate = ATE_MAKE(addr, pagesize);
|
||||
ate = ATE_MAKE(addr, pagesize, msi_wanted);
|
||||
ate_shadow[i + j] = ate;
|
||||
tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
|
||||
addr += pagesize;
|
||||
@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
||||
* Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
|
||||
*/
|
||||
static u64
|
||||
tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
|
||||
tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
|
||||
{
|
||||
int dma_ok;
|
||||
int port;
|
||||
@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
|
||||
u64 ct_lower;
|
||||
dma_addr_t bus_addr;
|
||||
|
||||
if (dma_flags & SN_DMA_MSI)
|
||||
return 0;
|
||||
|
||||
ct_upper = ct_addr & ~0x3fffffffUL;
|
||||
ct_lower = ct_addr & 0x3fffffffUL;
|
||||
|
||||
@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
|
||||
*/
|
||||
static u64
|
||||
tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||
int barrier)
|
||||
int barrier, int dma_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 ct_addr;
|
||||
@@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||
if (dma_mask < 0x7fffffffUL)
|
||||
return 0;
|
||||
|
||||
ct_addr = PHYS_TO_TIODMA(paddr);
|
||||
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
|
||||
ct_addr = PHYS_TO_TIODMA(paddr);
|
||||
else
|
||||
ct_addr = paddr;
|
||||
|
||||
/*
|
||||
* If the device can generate 64 bit addresses, create a D64 map.
|
||||
* Since this should never fail, bypass the rest of the checks.
|
||||
*/
|
||||
if (dma_mask == ~0UL) {
|
||||
mapaddr = tioce_dma_d64(ct_addr);
|
||||
goto dma_map_done;
|
||||
mapaddr = tioce_dma_d64(ct_addr, dma_flags);
|
||||
if (mapaddr)
|
||||
goto dma_map_done;
|
||||
}
|
||||
|
||||
pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
|
||||
@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||
|
||||
if (byte_count > MB(64)) {
|
||||
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
|
||||
port, ct_addr, byte_count);
|
||||
port, ct_addr, byte_count,
|
||||
dma_flags);
|
||||
if (!mapaddr)
|
||||
mapaddr =
|
||||
tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
|
||||
ct_addr, byte_count);
|
||||
ct_addr, byte_count,
|
||||
dma_flags);
|
||||
} else {
|
||||
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
|
||||
ct_addr, byte_count);
|
||||
ct_addr, byte_count,
|
||||
dma_flags);
|
||||
if (!mapaddr)
|
||||
mapaddr =
|
||||
tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
|
||||
port, ct_addr, byte_count);
|
||||
port, ct_addr, byte_count,
|
||||
dma_flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||
* 32-bit direct is the next mode to try
|
||||
*/
|
||||
if (!mapaddr && dma_mask >= 0xffffffffUL)
|
||||
mapaddr = tioce_dma_d32(pdev, ct_addr);
|
||||
mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
|
||||
|
||||
/*
|
||||
* Last resort, try 32-bit ATE-based map.
|
||||
@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||
if (!mapaddr)
|
||||
mapaddr =
|
||||
tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
|
||||
byte_count);
|
||||
byte_count, dma_flags);
|
||||
|
||||
spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
|
||||
|
||||
@@ -622,9 +643,9 @@ dma_map_done:
|
||||
* in the address.
|
||||
*/
|
||||
static u64
|
||||
tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
|
||||
tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
||||
{
|
||||
return tioce_do_dma_map(pdev, paddr, byte_count, 0);
|
||||
return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
|
||||
* Simply call tioce_do_dma_map() to create a map with the barrier bit set
|
||||
* in the address.
|
||||
*/ static u64
|
||||
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count)
|
||||
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
||||
{
|
||||
return tioce_do_dma_map(pdev, paddr, byte_count, 1);
|
||||
return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
|
||||
while (ate_index <= last_ate) {
|
||||
u64 ate;
|
||||
|
||||
ate = ATE_MAKE(0xdeadbeef, ps);
|
||||
ate = ATE_MAKE(0xdeadbeef, ps, 0);
|
||||
ce_kern->ce_ate3240_shadow[ate_index] = ate;
|
||||
tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
|
||||
ate);
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele