powerpc: Convert to using %pOF instead of full_name
Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring <robh@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Anatolij Gustschin <agust@denx.de> Cc: Scott Wood <oss@buserror.net> Cc: Kumar Gala <galak@kernel.crashing.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: linuxppc-dev@lists.ozlabs.org Reviewed-by: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

committed by
Michael Ellerman

parent
bcf21e3a97
commit
b7c670d673
@@ -187,8 +187,8 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
|
||||
|
||||
irq_domain = irq_find_host(dn);
|
||||
if (!irq_domain) {
|
||||
dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
|
||||
dn->full_name);
|
||||
dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
|
||||
dn);
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
@@ -326,8 +326,8 @@ static void axon_msi_shutdown(struct platform_device *device)
|
||||
struct axon_msic *msic = dev_get_drvdata(&device->dev);
|
||||
u32 tmp;
|
||||
|
||||
pr_devel("axon_msi: disabling %s\n",
|
||||
irq_domain_get_of_node(msic->irq_domain)->full_name);
|
||||
pr_devel("axon_msi: disabling %pOF\n",
|
||||
irq_domain_get_of_node(msic->irq_domain));
|
||||
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
|
||||
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
|
||||
msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
|
||||
@@ -340,12 +340,12 @@ static int axon_msi_probe(struct platform_device *device)
|
||||
unsigned int virq;
|
||||
int dcr_base, dcr_len;
|
||||
|
||||
pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
|
||||
pr_devel("axon_msi: setting up dn %pOF\n", dn);
|
||||
|
||||
msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
|
||||
if (!msic) {
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
|
||||
dn->full_name);
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
|
||||
dn);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -354,30 +354,30 @@ static int axon_msi_probe(struct platform_device *device)
|
||||
|
||||
if (dcr_base == 0 || dcr_len == 0) {
|
||||
printk(KERN_ERR
|
||||
"axon_msi: couldn't parse dcr properties on %s\n",
|
||||
dn->full_name);
|
||||
"axon_msi: couldn't parse dcr properties on %pOF\n",
|
||||
dn);
|
||||
goto out_free_msic;
|
||||
}
|
||||
|
||||
msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
|
||||
if (!DCR_MAP_OK(msic->dcr_host)) {
|
||||
printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
|
||||
dn->full_name);
|
||||
printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
|
||||
dn);
|
||||
goto out_free_msic;
|
||||
}
|
||||
|
||||
msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
|
||||
&msic->fifo_phys, GFP_KERNEL);
|
||||
if (!msic->fifo_virt) {
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
|
||||
dn->full_name);
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
|
||||
dn);
|
||||
goto out_free_msic;
|
||||
}
|
||||
|
||||
virq = irq_of_parse_and_map(dn, 0);
|
||||
if (!virq) {
|
||||
printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
|
||||
dn->full_name);
|
||||
printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
|
||||
dn);
|
||||
goto out_free_fifo;
|
||||
}
|
||||
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
|
||||
@@ -385,8 +385,8 @@ static int axon_msi_probe(struct platform_device *device)
|
||||
/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
|
||||
msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
|
||||
if (!msic->irq_domain) {
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
|
||||
dn->full_name);
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
|
||||
dn);
|
||||
goto out_free_fifo;
|
||||
}
|
||||
|
||||
@@ -412,7 +412,7 @@ static int axon_msi_probe(struct platform_device *device)
|
||||
|
||||
axon_msi_debug_setup(dn, msic);
|
||||
|
||||
printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
|
||||
printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@@ -303,8 +303,8 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
|
||||
iic->node = of_node_get(node);
|
||||
out_be64(&iic->regs->prio, 0);
|
||||
|
||||
printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
|
||||
hw_cpu, iic->target_id, node->full_name);
|
||||
printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n",
|
||||
hw_cpu, iic->target_id, node);
|
||||
}
|
||||
|
||||
static int __init setup_iic(void)
|
||||
|
@@ -278,8 +278,8 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
|
||||
if (of_node_to_nid(np) != nid)
|
||||
continue;
|
||||
if (of_address_to_resource(np, 0, &r)) {
|
||||
printk(KERN_ERR "iommu: can't get address for %s\n",
|
||||
np->full_name);
|
||||
printk(KERN_ERR "iommu: can't get address for %pOF\n",
|
||||
np);
|
||||
continue;
|
||||
}
|
||||
*base = r.start;
|
||||
@@ -458,8 +458,8 @@ static inline u32 cell_iommu_get_ioid(struct device_node *np)
|
||||
|
||||
ioid = of_get_property(np, "ioid", NULL);
|
||||
if (ioid == NULL) {
|
||||
printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
|
||||
np->full_name);
|
||||
printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
|
||||
np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -559,8 +559,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
|
||||
*/
|
||||
iommu = cell_iommu_for_node(dev_to_node(dev));
|
||||
if (iommu == NULL || list_empty(&iommu->windows)) {
|
||||
dev_err(dev, "iommu: missing iommu for %s (node %d)\n",
|
||||
of_node_full_name(dev->of_node), dev_to_node(dev));
|
||||
dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
|
||||
dev->of_node, dev_to_node(dev));
|
||||
return NULL;
|
||||
}
|
||||
window = list_entry(iommu->windows.next, struct iommu_window, list);
|
||||
@@ -720,12 +720,12 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
|
||||
/* Get node ID */
|
||||
nid = of_node_to_nid(np);
|
||||
if (nid < 0) {
|
||||
printk(KERN_ERR "iommu: failed to get node for %s\n",
|
||||
np->full_name);
|
||||
printk(KERN_ERR "iommu: failed to get node for %pOF\n",
|
||||
np);
|
||||
return NULL;
|
||||
}
|
||||
pr_debug("iommu: setting up iommu for node %d (%s)\n",
|
||||
nid, np->full_name);
|
||||
pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
|
||||
nid, np);
|
||||
|
||||
/* XXX todo: If we can have multiple windows on the same IOMMU, which
|
||||
* isn't the case today, we probably want here to check whether the
|
||||
@@ -736,8 +736,8 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
|
||||
*/
|
||||
|
||||
if (cbe_nr_iommus >= NR_IOMMUS) {
|
||||
printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
|
||||
np->full_name);
|
||||
printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
|
||||
np);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -196,8 +196,8 @@ static int __init cbe_ptcal_enable(void)
|
||||
for_each_node_by_type(np, "cpu") {
|
||||
const u32 *nid = of_get_property(np, "node-id", NULL);
|
||||
if (!nid) {
|
||||
printk(KERN_ERR "%s: node %s is missing node-id?\n",
|
||||
__func__, np->full_name);
|
||||
printk(KERN_ERR "%s: node %pOF is missing node-id?\n",
|
||||
__func__, np);
|
||||
continue;
|
||||
}
|
||||
cbe_ptcal_enable_on_node(*nid, order);
|
||||
|
@@ -130,8 +130,8 @@ int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data)
|
||||
struct resource r;
|
||||
unsigned long offset = (unsigned long)data;
|
||||
|
||||
pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%s)\n",
|
||||
np->full_name);
|
||||
pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n",
|
||||
np);
|
||||
|
||||
priv = kzalloc(sizeof(struct spiderpci_iowa_private), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
|
@@ -323,8 +323,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
|
||||
irq_set_handler_data(virq, pic);
|
||||
irq_set_chained_handler(virq, spider_irq_cascade);
|
||||
|
||||
printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
|
||||
pic->node_id, addr, of_node->full_name);
|
||||
printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n",
|
||||
pic->node_id, addr, of_node);
|
||||
|
||||
/* Enable the interrupt detection enable bit. Do this last! */
|
||||
out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1);
|
||||
|
@@ -191,8 +191,8 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
|
||||
goto err;
|
||||
}
|
||||
ret = -EINVAL;
|
||||
pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0],
|
||||
oirq.np->full_name);
|
||||
pr_debug(" irq %d no 0x%x on %pOF\n", i, oirq.args[0],
|
||||
oirq.np);
|
||||
spu->irqs[i] = irq_create_of_mapping(&oirq);
|
||||
if (!spu->irqs[i]) {
|
||||
pr_debug("spu_new: failed to map it !\n");
|
||||
@@ -243,32 +243,32 @@ static int __init spu_map_device(struct spu *spu)
|
||||
ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
|
||||
&spu->local_store_phys);
|
||||
if (ret) {
|
||||
pr_debug("spu_new: failed to map %s resource 0\n",
|
||||
np->full_name);
|
||||
pr_debug("spu_new: failed to map %pOF resource 0\n",
|
||||
np);
|
||||
goto out;
|
||||
}
|
||||
ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
|
||||
&spu->problem_phys);
|
||||
if (ret) {
|
||||
pr_debug("spu_new: failed to map %s resource 1\n",
|
||||
np->full_name);
|
||||
pr_debug("spu_new: failed to map %pOF resource 1\n",
|
||||
np);
|
||||
goto out_unmap;
|
||||
}
|
||||
ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
|
||||
if (ret) {
|
||||
pr_debug("spu_new: failed to map %s resource 2\n",
|
||||
np->full_name);
|
||||
pr_debug("spu_new: failed to map %pOF resource 2\n",
|
||||
np);
|
||||
goto out_unmap;
|
||||
}
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
ret = spu_map_resource(spu, 3,
|
||||
(void __iomem**)&spu->priv1, NULL);
|
||||
if (ret) {
|
||||
pr_debug("spu_new: failed to map %s resource 3\n",
|
||||
np->full_name);
|
||||
pr_debug("spu_new: failed to map %pOF resource 3\n",
|
||||
np);
|
||||
goto out_unmap;
|
||||
}
|
||||
pr_debug("spu_new: %s maps:\n", np->full_name);
|
||||
pr_debug("spu_new: %pOF maps:\n", np);
|
||||
pr_debug(" local store : 0x%016lx -> 0x%p\n",
|
||||
spu->local_store_phys, spu->local_store);
|
||||
pr_debug(" problem state : 0x%016lx -> 0x%p\n",
|
||||
@@ -316,8 +316,8 @@ static int __init of_create_spu(struct spu *spu, void *data)
|
||||
|
||||
spu->node = of_node_to_nid(spe);
|
||||
if (spu->node >= MAX_NUMNODES) {
|
||||
printk(KERN_WARNING "SPE %s on node %d ignored,"
|
||||
" node number too big\n", spe->full_name, spu->node);
|
||||
printk(KERN_WARNING "SPE %pOF on node %d ignored,"
|
||||
" node number too big\n", spe, spu->node);
|
||||
printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
|
Reference in New Issue
Block a user