cxl: Support the cxl kernel API from a guest

Like on bare-metal, the cxl driver creates a virtual PHB and a pci
device for the AFU. The configuration space of the device is mapped to
the configuration record of the AFU.

Reuse the code defined in afu_cr_read8|16|32() when reading the
configuration space of the AFU device.

Even though the (virtual) AFU device is a pci device, the adapter is
not. So a driver using the cxl kernel API cannot read the VPD of the
adapter through the usual PCI interface. Therefore, we add a call to
the cxl kernel API:
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);

Co-authored-by: Christophe Lombard <clombard@linux.vnet.ibm.com>
Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Signed-off-by: Christophe Lombard <clombard@linux.vnet.ibm.com>
Reviewed-by: Manoj Kumar <manoj@linux.vnet.ibm.com>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Frederic Barrat
2016-03-04 12:26:40 +01:00
committed by Michael Ellerman
parent b40844aa55
commit d601ea918b
7 changed files with 202 additions and 109 deletions

View File

@@ -89,28 +89,11 @@ int cxl_release_context(struct cxl_context *ctx)
}
EXPORT_SYMBOL_GPL(cxl_release_context);
int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
{
if (num == 0)
num = ctx->afu->pp_irqs;
return afu_allocate_irqs(ctx, num);
}
EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
void cxl_free_afu_irqs(struct cxl_context *ctx)
{
afu_irq_name_free(ctx);
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
}
EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
{
__u16 range;
int r;
WARN_ON(num == 0);
for (r = 0; r < CXL_IRQ_RANGES; r++) {
range = ctx->irqs.range[r];
if (num < range) {
@@ -121,6 +104,44 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
{
int res;
irq_hw_number_t hwirq;
if (num == 0)
num = ctx->afu->pp_irqs;
res = afu_allocate_irqs(ctx, num);
if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) {
/* In a guest, the PSL interrupt is not multiplexed. It was
* allocated above, and we need to set its handler
*/
hwirq = cxl_find_afu_irq(ctx, 0);
if (hwirq)
cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
}
return res;
}
EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
void cxl_free_afu_irqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
unsigned int virq;
if (!cpu_has_feature(CPU_FTR_HVMODE)) {
hwirq = cxl_find_afu_irq(ctx, 0);
if (hwirq) {
virq = irq_find_mapping(NULL, hwirq);
if (virq)
cxl_unmap_irq(virq, ctx);
}
}
afu_irq_name_free(ctx);
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
}
EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
int cxl_map_afu_irq(struct cxl_context *ctx, int num,
irq_handler_t handler, void *cookie, char *name)
{
@@ -356,3 +377,11 @@ void cxl_perst_reloads_same_image(struct cxl_afu *afu,
afu->adapter->perst_same_image = perst_reloads_same_image;
}
EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
{
struct cxl_afu *afu = cxl_pci_to_afu(dev);
return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
}
EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);