Merge branch 'pci/msi-affinity' into next
Conflicts: drivers/nvme/host/pci.c
Šī revīzija ir iekļauta:
@@ -4,6 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2003-2004 Intel
|
||||
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
|
||||
* Copyright (C) 2016 Christoph Hellwig.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
@@ -207,6 +208,12 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
||||
desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
|
||||
}
|
||||
|
||||
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
|
||||
{
|
||||
return desc->mask_base +
|
||||
desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This internal function does not flush PCI writes to the device.
|
||||
* All users must ensure that they read from the device before either
|
||||
@@ -217,8 +224,6 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
||||
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
|
||||
{
|
||||
u32 mask_bits = desc->masked;
|
||||
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
|
||||
if (pci_msi_ignore_mask)
|
||||
return 0;
|
||||
@@ -226,7 +231,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
|
||||
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
if (flag)
|
||||
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
writel(mask_bits, desc->mask_base + offset);
|
||||
writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
|
||||
return mask_bits;
|
||||
}
|
||||
@@ -284,8 +289,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
BUG_ON(dev->current_state != PCI_D0);
|
||||
|
||||
if (entry->msi_attrib.is_msix) {
|
||||
void __iomem *base = entry->mask_base +
|
||||
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
||||
void __iomem *base = pci_msix_desc_addr(entry);
|
||||
|
||||
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
|
||||
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
@@ -315,9 +319,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
if (dev->current_state != PCI_D0) {
|
||||
/* Don't touch the hardware now */
|
||||
} else if (entry->msi_attrib.is_msix) {
|
||||
void __iomem *base;
|
||||
base = entry->mask_base +
|
||||
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
||||
void __iomem *base = pci_msix_desc_addr(entry);
|
||||
|
||||
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
|
||||
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
@@ -567,6 +569,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
|
||||
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
||||
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
|
||||
entry->nvec_used = nvec;
|
||||
entry->affinity = dev->irq_affinity;
|
||||
|
||||
if (control & PCI_MSI_FLAGS_64BIT)
|
||||
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
|
||||
@@ -678,10 +681,18 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
|
||||
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
struct msix_entry *entries, int nvec)
|
||||
{
|
||||
const struct cpumask *mask = NULL;
|
||||
struct msi_desc *entry;
|
||||
int i;
|
||||
int cpu = -1, i;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
if (dev->irq_affinity) {
|
||||
cpu = cpumask_next(cpu, dev->irq_affinity);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = cpumask_first(dev->irq_affinity);
|
||||
mask = cpumask_of(cpu);
|
||||
}
|
||||
|
||||
entry = alloc_msi_entry(&dev->dev);
|
||||
if (!entry) {
|
||||
if (!i)
|
||||
@@ -694,10 +705,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
|
||||
entry->msi_attrib.is_msix = 1;
|
||||
entry->msi_attrib.is_64 = 1;
|
||||
entry->msi_attrib.entry_nr = entries[i].entry;
|
||||
if (entries)
|
||||
entry->msi_attrib.entry_nr = entries[i].entry;
|
||||
else
|
||||
entry->msi_attrib.entry_nr = i;
|
||||
entry->msi_attrib.default_irq = dev->irq;
|
||||
entry->mask_base = base;
|
||||
entry->nvec_used = 1;
|
||||
entry->affinity = mask;
|
||||
|
||||
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
||||
}
|
||||
@@ -712,13 +727,11 @@ static void msix_program_entries(struct pci_dev *dev,
|
||||
int i = 0;
|
||||
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
|
||||
entries[i].vector = entry->irq;
|
||||
entry->masked = readl(entry->mask_base + offset);
|
||||
if (entries)
|
||||
entries[i++].vector = entry->irq;
|
||||
entry->masked = readl(pci_msix_desc_addr(entry) +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
msix_mask_irq(entry, 1);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -931,7 +944,7 @@ EXPORT_SYMBOL(pci_msix_vec_count);
|
||||
/**
|
||||
* pci_enable_msix - configure device's MSI-X capability structure
|
||||
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
||||
* @entries: pointer to an array of MSI-X entries
|
||||
* @entries: pointer to an array of MSI-X entries (optional)
|
||||
* @nvec: number of MSI-X irqs requested for allocation by device driver
|
||||
*
|
||||
* Setup the MSI-X capability structure of device function with the number
|
||||
@@ -951,22 +964,21 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
|
||||
if (!pci_msi_supported(dev, nvec))
|
||||
return -EINVAL;
|
||||
|
||||
if (!entries)
|
||||
return -EINVAL;
|
||||
|
||||
nr_entries = pci_msix_vec_count(dev);
|
||||
if (nr_entries < 0)
|
||||
return nr_entries;
|
||||
if (nvec > nr_entries)
|
||||
return nr_entries;
|
||||
|
||||
/* Check for any invalid entries */
|
||||
for (i = 0; i < nvec; i++) {
|
||||
if (entries[i].entry >= nr_entries)
|
||||
return -EINVAL; /* invalid entry */
|
||||
for (j = i + 1; j < nvec; j++) {
|
||||
if (entries[i].entry == entries[j].entry)
|
||||
return -EINVAL; /* duplicate entry */
|
||||
if (entries) {
|
||||
/* Check for any invalid entries */
|
||||
for (i = 0; i < nvec; i++) {
|
||||
if (entries[i].entry >= nr_entries)
|
||||
return -EINVAL; /* invalid entry */
|
||||
for (j = i + 1; j < nvec; j++) {
|
||||
if (entries[i].entry == entries[j].entry)
|
||||
return -EINVAL; /* duplicate entry */
|
||||
}
|
||||
}
|
||||
}
|
||||
WARN_ON(!!dev->msix_enabled);
|
||||
@@ -1026,19 +1038,8 @@ int pci_msi_enabled(void)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_msi_enabled);
|
||||
|
||||
/**
|
||||
* pci_enable_msi_range - configure device's MSI capability structure
|
||||
* @dev: device to configure
|
||||
* @minvec: minimal number of interrupts to configure
|
||||
* @maxvec: maximum number of interrupts to configure
|
||||
*
|
||||
* This function tries to allocate a maximum possible number of interrupts in a
|
||||
* range between @minvec and @maxvec. It returns a negative errno if an error
|
||||
* occurs. If it succeeds, it returns the actual number of interrupts allocated
|
||||
* and updates the @dev's irq member to the lowest new interrupt number;
|
||||
* the other interrupt numbers allocated to this device are consecutive.
|
||||
**/
|
||||
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
|
||||
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
||||
unsigned int flags)
|
||||
{
|
||||
int nvec;
|
||||
int rc;
|
||||
@@ -1061,26 +1062,86 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
|
||||
nvec = pci_msi_vec_count(dev);
|
||||
if (nvec < 0)
|
||||
return nvec;
|
||||
else if (nvec < minvec)
|
||||
if (nvec < minvec)
|
||||
return -EINVAL;
|
||||
else if (nvec > maxvec)
|
||||
|
||||
if (nvec > maxvec)
|
||||
nvec = maxvec;
|
||||
|
||||
do {
|
||||
rc = msi_capability_init(dev, nvec);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
} else if (rc > 0) {
|
||||
if (rc < minvec)
|
||||
for (;;) {
|
||||
if (!(flags & PCI_IRQ_NOAFFINITY)) {
|
||||
dev->irq_affinity = irq_create_affinity_mask(&nvec);
|
||||
if (nvec < minvec)
|
||||
return -ENOSPC;
|
||||
nvec = rc;
|
||||
}
|
||||
} while (rc);
|
||||
|
||||
return nvec;
|
||||
rc = msi_capability_init(dev, nvec);
|
||||
if (rc == 0)
|
||||
return nvec;
|
||||
|
||||
kfree(dev->irq_affinity);
|
||||
dev->irq_affinity = NULL;
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc < minvec)
|
||||
return -ENOSPC;
|
||||
|
||||
nvec = rc;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_enable_msi_range - configure device's MSI capability structure
|
||||
* @dev: device to configure
|
||||
* @minvec: minimal number of interrupts to configure
|
||||
* @maxvec: maximum number of interrupts to configure
|
||||
*
|
||||
* This function tries to allocate a maximum possible number of interrupts in a
|
||||
* range between @minvec and @maxvec. It returns a negative errno if an error
|
||||
* occurs. If it succeeds, it returns the actual number of interrupts allocated
|
||||
* and updates the @dev's irq member to the lowest new interrupt number;
|
||||
* the other interrupt numbers allocated to this device are consecutive.
|
||||
**/
|
||||
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
|
||||
{
|
||||
return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msi_range);
|
||||
|
||||
static int __pci_enable_msix_range(struct pci_dev *dev,
|
||||
struct msix_entry *entries, int minvec, int maxvec,
|
||||
unsigned int flags)
|
||||
{
|
||||
int nvec = maxvec;
|
||||
int rc;
|
||||
|
||||
if (maxvec < minvec)
|
||||
return -ERANGE;
|
||||
|
||||
for (;;) {
|
||||
if (!(flags & PCI_IRQ_NOAFFINITY)) {
|
||||
dev->irq_affinity = irq_create_affinity_mask(&nvec);
|
||||
if (nvec < minvec)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
rc = pci_enable_msix(dev, entries, nvec);
|
||||
if (rc == 0)
|
||||
return nvec;
|
||||
|
||||
kfree(dev->irq_affinity);
|
||||
dev->irq_affinity = NULL;
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc < minvec)
|
||||
return -ENOSPC;
|
||||
|
||||
nvec = rc;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_enable_msix_range - configure device's MSI-X capability structure
|
||||
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
||||
@@ -1097,29 +1158,102 @@ EXPORT_SYMBOL(pci_enable_msi_range);
|
||||
* with new allocated MSI-X interrupts.
|
||||
**/
|
||||
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int minvec, int maxvec)
|
||||
int minvec, int maxvec)
|
||||
{
|
||||
int nvec = maxvec;
|
||||
int rc;
|
||||
|
||||
if (maxvec < minvec)
|
||||
return -ERANGE;
|
||||
|
||||
do {
|
||||
rc = pci_enable_msix(dev, entries, nvec);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
} else if (rc > 0) {
|
||||
if (rc < minvec)
|
||||
return -ENOSPC;
|
||||
nvec = rc;
|
||||
}
|
||||
} while (rc);
|
||||
|
||||
return nvec;
|
||||
return __pci_enable_msix_range(dev, entries, minvec, maxvec,
|
||||
PCI_IRQ_NOAFFINITY);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msix_range);
|
||||
|
||||
/**
|
||||
* pci_alloc_irq_vectors - allocate multiple IRQs for a device
|
||||
* @dev: PCI device to operate on
|
||||
* @min_vecs: minimum number of vectors required (must be >= 1)
|
||||
* @max_vecs: maximum (desired) number of vectors
|
||||
* @flags: flags or quirks for the allocation
|
||||
*
|
||||
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
|
||||
* vectors if available, and fall back to a single legacy vector
|
||||
* if neither is available. Return the number of vectors allocated,
|
||||
* (which might be smaller than @max_vecs) if successful, or a negative
|
||||
* error code on error. If less than @min_vecs interrupt vectors are
|
||||
* available for @dev the function will fail with -ENOSPC.
|
||||
*
|
||||
* To get the Linux IRQ number used for a vector that can be passed to
|
||||
* request_irq() use the pci_irq_vector() helper.
|
||||
*/
|
||||
int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags)
|
||||
{
|
||||
int vecs = -ENOSPC;
|
||||
|
||||
if (!(flags & PCI_IRQ_NOMSIX)) {
|
||||
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
|
||||
flags);
|
||||
if (vecs > 0)
|
||||
return vecs;
|
||||
}
|
||||
|
||||
if (!(flags & PCI_IRQ_NOMSI)) {
|
||||
vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
|
||||
if (vecs > 0)
|
||||
return vecs;
|
||||
}
|
||||
|
||||
/* use legacy irq if allowed */
|
||||
if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
|
||||
return 1;
|
||||
return vecs;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_alloc_irq_vectors);
|
||||
|
||||
/**
|
||||
* pci_free_irq_vectors - free previously allocated IRQs for a device
|
||||
* @dev: PCI device to operate on
|
||||
*
|
||||
* Undoes the allocations and enabling in pci_alloc_irq_vectors().
|
||||
*/
|
||||
void pci_free_irq_vectors(struct pci_dev *dev)
|
||||
{
|
||||
pci_disable_msix(dev);
|
||||
pci_disable_msi(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_free_irq_vectors);
|
||||
|
||||
/**
|
||||
* pci_irq_vector - return Linux IRQ number of a device vector
|
||||
* @dev: PCI device to operate on
|
||||
* @nr: device-relative interrupt vector index (0-based).
|
||||
*/
|
||||
int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
|
||||
{
|
||||
if (dev->msix_enabled) {
|
||||
struct msi_desc *entry;
|
||||
int i = 0;
|
||||
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
if (i == nr)
|
||||
return entry->irq;
|
||||
i++;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->msi_enabled) {
|
||||
struct msi_desc *entry = first_pci_msi_entry(dev);
|
||||
|
||||
if (WARN_ON_ONCE(nr >= entry->nvec_used))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (WARN_ON_ONCE(nr > 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dev->irq + nr;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_irq_vector);
|
||||
|
||||
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
|
||||
{
|
||||
return to_pci_dev(desc->dev);
|
||||
|
@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
|
||||
else
|
||||
pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
|
||||
*(u16 *)buf);
|
||||
buf += 2;
|
||||
buf += 4;
|
||||
}
|
||||
len += 2;
|
||||
len += 4;
|
||||
|
||||
/*
|
||||
* If we have any Low Priority VCs and a VC Arbitration Table Offset
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user