Merge tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates from Michael Ellerman:
 "Highlights include:

   - Support for the kexec_file_load() syscall, which is a prereq for
     secure and trusted boot.

   - Prevent kernel execution of userspace on P9 Radix (similar to
     SMEP/PXN).

   - Sort the exception tables at build time, to save time at boot, and
     store them as relative offsets to save space in the kernel image &
     memory.

   - Allow building the kernel with thin archives, which should allow us
     to build an allyesconfig once some other fixes land.

   - Build fixes to allow us to correctly rebuild when changing the
     kernel endian from big to little or vice versa.

   - Plumbing so that we can avoid doing a full mm TLB flush on P9
     Radix.

   - Initial stack protector support (-fstack-protector).

   - Support for dumping the radix (aka. Linux) and hash page tables via
     debugfs.

   - Fix an oops in cxl coredump generation when cxl_get_fd() is used.

   - Freescale updates from Scott: "Highlights include 8xx hugepage
     support, qbman fixes/cleanup, device tree updates, and some misc
     cleanup."

   - Many and varied fixes and minor enhancements as always.

  Thanks to:
    Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Anshuman
    Khandual, Anton Blanchard, Balbir Singh, Bartlomiej Zolnierkiewicz,
    Christophe Jaillet, Christophe Leroy, Denis Kirjanov, Elimar
    Riesebieter, Frederic Barrat, Gautham R. Shenoy, Geliang Tang, Geoff
    Levand, Jack Miller, Johan Hovold, Lars-Peter Clausen, Libin,
    Madhavan Srinivasan, Michael Neuling, Nathan Fontenot, Naveen N.
    Rao, Nicholas Piggin, Pan Xinhui, Peter Senna Tschudin, Rashmica
    Gupta, Rui Teng, Russell Currey, Scott Wood, Simon Guo, Suraj
    Jitindar Singh, Thiago Jung Bauermann, Tobias Klauser, Vaibhav Jain"

[ And thanks to Michael, who took time off from a new baby to get this
  pull request done.   - Linus ]

* tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (174 commits)
  powerpc/fsl/dts: add FMan node for t1042d4rdb
  powerpc/fsl/dts: add sg_2500_aqr105_phy4 alias on t1024rdb
  powerpc/fsl/dts: add QMan and BMan nodes on t1024
  powerpc/fsl/dts: add QMan and BMan nodes on t1023
  soc/fsl/qman: test: use DEFINE_SPINLOCK()
  powerpc/fsl-lbc: use DEFINE_SPINLOCK()
  powerpc/8xx: Implement support of hugepages
  powerpc: get hugetlbpage handling more generic
  powerpc: port 64 bits pgtable_cache to 32 bits
  powerpc/boot: Request no dynamic linker for boot wrapper
  soc/fsl/bman: Use resource_size instead of computation
  soc/fsl/qe: use builtin_platform_driver
  powerpc/fsl_pmc: use builtin_platform_driver
  powerpc/83xx/suspend: use builtin_platform_driver
  powerpc/ftrace: Fix the comments for ftrace_modify_code
  powerpc/perf: macros for power9 format encoding
  powerpc/perf: power9 raw event format encoding
  powerpc/perf: update attribute_group data structure
  powerpc/perf: factor out the event format field
  powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown
  ...
This commit is contained in:
Linus Torvalds
2016-12-16 09:26:42 -08:00
förälder 57ca04ab44 c6f6634721
incheckning de399813b5
259 ändrade filer med 8706 tillägg och 2642 borttagningar

Visa fil

@@ -103,18 +103,18 @@ config 405GP
bool
select IBM405_ERR77
select IBM405_ERR51
select IBM_EMAC_ZMII
select IBM_EMAC_ZMII if IBM_EMAC
config 405EX
bool
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
config 405EZ
bool
select IBM_EMAC_NO_FLOW_CTRL
select IBM_EMAC_MAL_CLR_ICINTSTAT
select IBM_EMAC_MAL_COMMON_ERR
select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC
select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC
select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC
config XILINX_VIRTEX
bool

Visa fil

@@ -26,7 +26,7 @@ config BLUESTONE
select PCI_MSI
select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII
select IBM_EMAC_RGMII if IBM_EMAC
help
This option enables support for the APM APM821xx Evaluation board.
@@ -125,8 +125,8 @@ config CANYONLANDS
select PPC4xx_PCI_EXPRESS
select PCI_MSI
select PPC4xx_MSI
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
help
This option enables support for the AMCC PPC460EX evaluation board.
@@ -138,8 +138,8 @@ config GLACIER
select 460EX # Odd since it uses 460GT but the effects are the same
select PCI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
help
This option enables support for the AMCC PPC460GT evaluation board.
@@ -164,7 +164,7 @@ config EIGER
select 460SX
select PCI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII
select IBM_EMAC_RGMII if IBM_EMAC
help
This option enables support for the AMCC PPC460SX evaluation board.
@@ -213,7 +213,7 @@ config AKEBONO
select NETDEVICES
select ETHERNET
select NET_VENDOR_IBM
select IBM_EMAC_EMAC4
select IBM_EMAC_EMAC4 if IBM_EMAC
select USB if USB_SUPPORT
select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
@@ -291,54 +291,54 @@ config 440EP
bool
select PPC_FPU
select IBM440EP_ERR42
select IBM_EMAC_ZMII
select IBM_EMAC_ZMII if IBM_EMAC
config 440EPX
bool
select PPC_FPU
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
select USB_EHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_DESC
config 440GRX
bool
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
config 440GP
bool
select IBM_EMAC_ZMII
select IBM_EMAC_ZMII if IBM_EMAC
config 440GX
bool
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII #test only
select IBM_EMAC_TAH #test only
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC #test only
select IBM_EMAC_TAH if IBM_EMAC #test only
config 440SP
bool
config 440SPe
bool
select IBM_EMAC_EMAC4
select IBM_EMAC_EMAC4 if IBM_EMAC
config 460EX
bool
select PPC_FPU
select IBM_EMAC_EMAC4
select IBM_EMAC_TAH
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_TAH if IBM_EMAC
config 460SX
bool
select PPC_FPU
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
select IBM_EMAC_TAH
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
select IBM_EMAC_TAH if IBM_EMAC
config 476FPE
bool
@@ -347,8 +347,8 @@ config 476FPE
config APM821xx
bool
select PPC_FPU
select IBM_EMAC_EMAC4
select IBM_EMAC_TAH
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_TAH if IBM_EMAC
config 476FPE_ERR46
depends on 476FPE

Visa fil

@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = {
.remove = pmc_remove
};
static int pmc_init(void)
{
return platform_driver_register(&pmc_driver);
}
device_initcall(pmc_init);
builtin_platform_driver(pmc_driver);

Visa fil

@@ -253,6 +253,8 @@ endif # PPC32
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
select DEFAULT_UIMAGE
select E500
select PPC_E500MC if PPC64
help
This option enables support for running as a QEMU guest using
QEMU's generic e500 machine. This is not required if you're

Visa fil

@@ -220,7 +220,7 @@ define_machine(corenet_generic) {
*
* Likewise, problems have been seen with kexec when coreint is enabled.
*/
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
.get_irq = mpic_get_irq,
#else
.get_irq = mpic_get_coreint_irq,

Visa fil

@@ -349,13 +349,13 @@ struct smp_ops_t smp_85xx_ops = {
.cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
#endif
#if defined(CONFIG_KEXEC) && !defined(CONFIG_PPC64)
#if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64)
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
#endif
};
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_PPC32
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
@@ -458,7 +458,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
default_machine_kexec(image);
}
#endif /* CONFIG_KEXEC */
#endif /* CONFIG_KEXEC_CORE */
static void smp_85xx_basic_setup(int cpu_nr)
{
@@ -512,7 +512,7 @@ void __init mpc85xx_smp_init(void)
#endif
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
#endif

Visa fil

@@ -130,6 +130,7 @@ config 8xx_CPU6
config 8xx_CPU15
bool "CPU15 Silicon Errata"
depends on !HUGETLB_PAGE
default y
help
This enables a workaround for erratum CPU15 on MPC8xx chips.

Visa fil

@@ -168,17 +168,6 @@ config MPIC_BROKEN_REGREAD
well, but enabling it uses about 8KB of memory to keep copies
of the register contents in software.
config IBMVIO
depends on PPC_PSERIES
bool
default y
config IBMEBUS
depends on PPC_PSERIES
bool "Support for GX bus based adapters"
help
Bus device driver for GX bus based adapters.
config EEH
bool
depends on (PPC_POWERNV || PPC_PSERIES) && PCI

Visa fil

@@ -34,6 +34,7 @@ config PPC_8xx
select FSL_SOC
select 8xx
select PPC_LIB_RHEAP
select SYS_SUPPORTS_HUGETLBFS
config 40x
bool "AMCC 40x"

Visa fil

@@ -17,10 +17,10 @@ config PPC_CELL_NATIVE
select PPC_CELL_COMMON
select MPIC
select PPC_IO_WORKAROUNDS
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII #test only
select IBM_EMAC_TAH #test only
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC #test only
select IBM_EMAC_TAH if IBM_EMAC #test only
default n
config PPC_IBM_CELL_BLADE
@@ -46,7 +46,6 @@ config SPU_FS
default m
depends on PPC_CELL
select SPU_BASE
select MEMORY_HOTPLUG
help
The SPU file system is used to access Synergistic Processing
Units on machines implementing the Broadband Processor

Visa fil

@@ -676,7 +676,7 @@ static ssize_t spu_stat_show(struct device *dev,
static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
struct crash_spu_info {
struct spu *spu;

Visa fil

@@ -263,7 +263,7 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
regs->nip = entry->fixup;
regs->nip = extable_fixup(entry);
return 1;
}
return 0;

Visa fil

@@ -174,7 +174,7 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
regs->nip = entry->fixup;
regs->nip = extable_fixup(entry);
return 1;
}
return 0;

Visa fil

@@ -90,6 +90,7 @@ struct pmac_i2c_bus
int opened;
int polled; /* open mode */
struct platform_device *platform_dev;
struct lock_class_key lock_key;
/* ops */
int (*open)(struct pmac_i2c_bus *bus);
@@ -587,6 +588,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
bus->close = kw_i2c_close;
bus->xfer = kw_i2c_xfer;
mutex_init(&bus->mutex);
lockdep_set_class(&bus->mutex, &bus->lock_key);
if (controller == busnode)
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
@@ -815,6 +817,7 @@ static void __init pmu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = pmu_i2c_xfer;
mutex_init(&bus->mutex);
lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
@@ -938,6 +941,7 @@ static void __init smu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = smu_i2c_xfer;
mutex_init(&bus->mutex);
lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);

Visa fil

@@ -393,7 +393,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
/* Create PE */
ret = eeh_add_to_parent_pe(edev);
if (ret) {
pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n",
__func__, hose->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
return NULL;
@@ -1097,7 +1097,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
bus = eeh_pe_bus_get(pe);
if (!bus) {
pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
return -EIO;
}

Visa fil

@@ -263,7 +263,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
/* Enable the bypass window */
top = roundup_pow_of_two(top);
dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
npe->pe_number);
rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
npe->pe_number, npe->pe_number,

Visa fil

@@ -632,21 +632,11 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
static void opal_pdev_init(struct device_node *opal_node,
const char *compatible)
static void opal_pdev_init(const char *compatible)
{
struct device_node *np;
for_each_child_of_node(opal_node, np)
if (of_device_is_compatible(np, compatible))
of_platform_device_create(np, NULL, NULL);
}
static void opal_i2c_create_devs(void)
{
struct device_node *np;
for_each_compatible_node(np, NULL, "ibm,opal-i2c")
for_each_compatible_node(np, NULL, compatible)
of_platform_device_create(np, NULL, NULL);
}
@@ -718,7 +708,7 @@ static int __init opal_init(void)
opal_hmi_handler_init();
/* Create i2c platform devices */
opal_i2c_create_devs();
opal_pdev_init("ibm,opal-i2c");
/* Setup a heatbeat thread if requested by OPAL */
opal_init_heartbeat();
@@ -753,12 +743,12 @@ static int __init opal_init(void)
}
/* Initialize platform devices: IPMI backend, PRD & flash interface */
opal_pdev_init(opal_node, "ibm,opal-ipmi");
opal_pdev_init(opal_node, "ibm,opal-flash");
opal_pdev_init(opal_node, "ibm,opal-prd");
opal_pdev_init("ibm,opal-ipmi");
opal_pdev_init("ibm,opal-flash");
opal_pdev_init("ibm,opal-prd");
/* Initialise platform device: oppanel interface */
opal_pdev_init(opal_node, "ibm,opal-oppanel");
opal_pdev_init("ibm,opal-oppanel");
/* Initialise OPAL kmsg dumper for flushing console on panic */
opal_kmsg_init();

Visa fil

@@ -83,7 +83,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
#endif /* CONFIG_PCI_IOV*/
printk("%spci %s: [PE# %.3d] %pV",
printk("%spci %s: [PE# %.2x] %pV",
level, pfix, pe->pe_number, &vaf);
va_end(args);
@@ -145,8 +145,8 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
*/
rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
if (rc != OPAL_SUCCESS)
pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n",
if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
__func__, rc, phb->hose->global_number, pe_no);
return &phb->ioda.pe_array[pe_no];
@@ -155,13 +155,13 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
{
if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
pr_warn("%s: Invalid PE %d on PHB#%x\n",
pr_warn("%s: Invalid PE %x on PHB#%x\n",
__func__, pe_no, phb->hose->global_number);
return;
}
if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
pr_debug("%s: PE %d was reserved on PHB#%x\n",
pr_debug("%s: PE %x was reserved on PHB#%x\n",
__func__, pe_no, phb->hose->global_number);
pnv_ioda_init_pe(phb, pe_no);
@@ -229,7 +229,7 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb)
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
r->end -= (2 * phb->ioda.m64_segsize);
else
pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
phb->ioda.reserved_pe_idx);
return 0;
@@ -291,7 +291,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
OPAL_M64_WINDOW_TYPE, index, base, 0,
PNV_IODA1_M64_SEGS * segsz);
if (rc != OPAL_SUCCESS) {
pr_warn(" Error %lld setting M64 PHB#%d-BAR#%d\n",
pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
rc, phb->hose->global_number, index);
goto fail;
}
@@ -300,7 +300,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
OPAL_M64_WINDOW_TYPE, index,
OPAL_ENABLE_M64_SPLIT);
if (rc != OPAL_SUCCESS) {
pr_warn(" Error %lld enabling M64 PHB#%d-BAR#%d\n",
pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
rc, phb->hose->global_number, index);
goto fail;
}
@@ -316,7 +316,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
r->end -= (2 * phb->ioda.m64_segsize);
else
WARN(1, "Wrong reserved PE#%d on PHB#%d\n",
WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
phb->ioda.reserved_pe_idx, phb->hose->global_number);
return 0;
@@ -414,7 +414,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
pe->pe_number / PNV_IODA1_M64_SEGS,
pe->pe_number % PNV_IODA1_M64_SEGS);
if (rc != OPAL_SUCCESS)
pr_warn("%s: Error %lld mapping M64 for PHB#%d-PE#%d\n",
pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
__func__, rc, phb->hose->global_number,
pe->pe_number);
}
@@ -941,14 +941,14 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
pe->mve_number = pe->pe_number;
rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
if (rc != OPAL_SUCCESS) {
pe_err(pe, "OPAL error %ld setting up MVE %d\n",
pe_err(pe, "OPAL error %ld setting up MVE %x\n",
rc, pe->mve_number);
pe->mve_number = -1;
} else {
rc = opal_pci_set_mve_enable(phb->opal_id,
pe->mve_number, OPAL_ENABLE_MVE);
if (rc) {
pe_err(pe, "OPAL error %ld enabling MVE %d\n",
pe_err(pe, "OPAL error %ld enabling MVE %x\n",
rc, pe->mve_number);
pe->mve_number = -1;
}
@@ -1159,10 +1159,10 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
pe->rid = bus->busn_res.start << 8;
if (all)
pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
bus->busn_res.start, bus->busn_res.end, pe->pe_number);
else
pe_info(pe, "Secondary bus %d associated with PE#%d\n",
pe_info(pe, "Secondary bus %d associated with PE#%x\n",
bus->busn_res.start, pe->pe_number);
if (pnv_ioda_configure_pe(phb, pe)) {
@@ -1213,7 +1213,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
* peer NPU.
*/
dev_info(&npu_pdev->dev,
"Associating to existing PE %d\n", pe_num);
"Associating to existing PE %x\n", pe_num);
pci_dev_get(npu_pdev);
npu_pdn = pci_get_pdn(npu_pdev);
rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
@@ -1539,7 +1539,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
pci_iov_virtfn_devfn(pdev, vf_index);
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
hose->global_number, pdev->bus->number,
PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
@@ -2844,7 +2844,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
pnv_set_msi_irq_chip(phb, virq);
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
" address=%x_%08x data=%x PE# %x\n",
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
msg->address_hi, msg->address_lo, data, pe->pe_number);
@@ -2993,7 +2993,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
if (rc != OPAL_SUCCESS) {
pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n",
pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
__func__, rc, index, pe->pe_number);
break;
}
@@ -3017,7 +3017,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
if (rc != OPAL_SUCCESS) {
pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d",
pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
__func__, rc, index, pe->pe_number);
break;
}
@@ -3281,7 +3281,7 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pnv_pci_ioda2_setup_dma_pe(phb, pe);
break;
default:
pr_warn("%s: No DMA for PHB#%d (type %d)\n",
pr_warn("%s: No DMA for PHB#%x (type %d)\n",
__func__, phb->hose->global_number, phb->type);
}
}

Visa fil

@@ -234,7 +234,7 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
int i;
data = (struct OpalIoP7IOCPhbErrorData *)common;
pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
hose->global_number, be32_to_cpu(common->version));
if (data->brdgCtl)
@@ -326,7 +326,7 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
int i;
data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
hose->global_number, be32_to_cpu(common->version));
if (data->brdgCtl)
pr_info("brdgCtl: %08x\n",
@@ -516,7 +516,7 @@ static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
}
}
pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
(pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
/* Clear the frozen state if applicable */

Visa fil

@@ -174,7 +174,7 @@ static void pnv_shutdown(void)
opal_shutdown();
}
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
static void pnv_kexec_wait_secondaries_down(void)
{
int my_cpu, i, notified = -1;
@@ -245,7 +245,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
}
}
#endif /* CONFIG_KEXEC */
#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static unsigned long pnv_memory_block_size(void)
@@ -311,7 +311,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = NULL,
.calibrate_decr = generic_calibrate_decr,
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE

Visa fil

@@ -250,7 +250,7 @@ static int __init ps3_probe(void)
return 1;
}
#if defined(CONFIG_KEXEC)
#if defined(CONFIG_KEXEC_CORE)
static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
{
int cpu = smp_processor_id();
@@ -276,7 +276,7 @@ define_machine(ps3) {
.progress = ps3_progress,
.restart = ps3_restart,
.halt = ps3_halt,
#if defined(CONFIG_KEXEC)
#if defined(CONFIG_KEXEC_CORE)
.kexec_cpu_down = ps3_kexec_cpu_down,
#endif
};

Visa fil

@@ -127,3 +127,14 @@ config HV_PERF_CTRS
systems. 24x7 is available on Power 8 systems.
If unsure, select Y.
config IBMVIO
depends on PPC_PSERIES
bool
default y
config IBMEBUS
depends on PPC_PSERIES && !CPU_LITTLE_ENDIAN
bool "Support for GX bus based adapters"
help
Bus device driver for GX bus based adapters.

Visa fil

@@ -8,7 +8,7 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
pci.o pci_dlpar.o eeh_pseries.o msi.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
@@ -21,6 +21,8 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
ifeq ($(CONFIG_PPC_PSERIES),y)
obj-$(CONFIG_SUSPEND) += suspend.o

Visa fil

@@ -41,6 +41,8 @@
#include <linux/memory.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
#define CMM_DRIVER_VERSION "1.0.0"
#define CMM_DEFAULT_DELAY 1
#define CMM_HOTPLUG_DELAY 5
@@ -109,6 +111,38 @@ static int hotplug_occurred; /* protected by the hotplug mutex */
static struct task_struct *cmm_thread_ptr;
static long plpar_page_set_loaned(unsigned long vpa)
{
unsigned long cmo_page_sz = cmo_get_page_size();
long rc = 0;
int i;
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
vpa + i - cmo_page_sz, 0);
return rc;
}
static long plpar_page_set_active(unsigned long vpa)
{
unsigned long cmo_page_sz = cmo_get_page_size();
long rc = 0;
int i;
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
vpa + i - cmo_page_sz, 0);
return rc;
}
/**
* cmm_alloc_pages - Allocate pages and mark them as loaned
* @nr: number of pages to allocate

Visa fil

@@ -418,84 +418,136 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
}
}
static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "memory")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
} else if (sysfs_streq(arg, "cpu")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
} else {
pr_err("Invalid resource specified.\n");
return -EINVAL;
}
return 0;
}
static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "add")) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
} else if (sysfs_streq(arg, "remove")) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
} else {
pr_err("Invalid action specified.\n");
return -EINVAL;
}
return 0;
}
static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
u32 count, index;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "index")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC Index specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &index)) {
pr_err("Invalid DRC Index specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.drc_index = cpu_to_be32(index);
} else if (sysfs_streq(arg, "count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC count specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &count)) {
pr_err("Invalid DRC count specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.drc_count = cpu_to_be32(count);
} else {
pr_err("Invalid id_type specified.\n");
return -EINVAL;
}
return 0;
}
static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
struct pseries_hp_errorlog *hp_elog;
struct completion hotplug_done;
const char *arg;
char *argbuf;
char *args;
int rc;
args = argbuf = kstrdup(buf, GFP_KERNEL);
hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
if (!hp_elog) {
rc = -ENOMEM;
goto dlpar_store_out;
if (!hp_elog || !argbuf) {
pr_info("Could not allocate resources for DLPAR operation\n");
kfree(argbuf);
kfree(hp_elog);
return -ENOMEM;
}
/* Parse out the request from the user, this will be in the form
/*
* Parse out the request from the user, this will be in the form:
* <resource> <action> <id_type> <id>
*/
arg = buf;
if (!strncmp(arg, "memory", 6)) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
arg += strlen("memory ");
} else if (!strncmp(arg, "cpu", 3)) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
arg += strlen("cpu ");
} else {
pr_err("Invalid resource specified: \"%s\"\n", buf);
rc = -EINVAL;
rc = dlpar_parse_resource(&args, hp_elog);
if (rc)
goto dlpar_store_out;
}
if (!strncmp(arg, "add", 3)) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
arg += strlen("add ");
} else if (!strncmp(arg, "remove", 6)) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
arg += strlen("remove ");
} else {
pr_err("Invalid action specified: \"%s\"\n", buf);
rc = -EINVAL;
rc = dlpar_parse_action(&args, hp_elog);
if (rc)
goto dlpar_store_out;
}
if (!strncmp(arg, "index", 5)) {
u32 index;
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
arg += strlen("index ");
if (kstrtou32(arg, 0, &index)) {
rc = -EINVAL;
pr_err("Invalid drc_index specified: \"%s\"\n", buf);
goto dlpar_store_out;
}
hp_elog->_drc_u.drc_index = cpu_to_be32(index);
} else if (!strncmp(arg, "count", 5)) {
u32 count;
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
arg += strlen("count ");
if (kstrtou32(arg, 0, &count)) {
rc = -EINVAL;
pr_err("Invalid count specified: \"%s\"\n", buf);
goto dlpar_store_out;
}
hp_elog->_drc_u.drc_count = cpu_to_be32(count);
} else {
pr_err("Invalid id_type specified: \"%s\"\n", buf);
rc = -EINVAL;
rc = dlpar_parse_id_type(&args, hp_elog);
if (rc)
goto dlpar_store_out;
}
init_completion(&hotplug_done);
queue_hotplug_event(hp_elog, &hotplug_done, &rc);
wait_for_completion(&hotplug_done);
dlpar_store_out:
kfree(argbuf);
kfree(hp_elog);
if (rc)
pr_err("Could not handle DLPAR request \"%s\"\n", buf);
return rc ? rc : count;
}

Visa fil

@@ -270,7 +270,7 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
eeh_add_flag(EEH_ENABLED);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
__func__, pdn->busno, PCI_SLOT(pdn->devfn),
PCI_FUNC(pdn->devfn), pe.phb->global_number,
pe.addr);
@@ -371,7 +371,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
pe->config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid), 0);
if (ret) {
pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->config_addr);
return 0;
}
@@ -384,7 +384,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
pe->config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid), 0);
if (ret) {
pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->config_addr);
return 0;
}
@@ -653,7 +653,7 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
rtas_busy_delay(ret);
}
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, ret);
return ret;
}

Visa fil

@@ -472,12 +472,15 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
/* Validate that there are enough LMBs to satisfy the request */
for (i = 0; i < num_lmbs; i++) {
if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
if (lmb_is_removable(&lmbs[i]))
lmbs_available++;
}
if (lmbs_available < lmbs_to_remove)
if (lmbs_available < lmbs_to_remove) {
pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
lmbs_available, lmbs_to_remove);
return -EINVAL;
}
for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
rc = dlpar_remove_lmb(&lmbs[i]);

Visa fil

@@ -0,0 +1,469 @@
/*
* IBM PowerPC IBM eBus Infrastructure Support.
*
* Copyright (c) 2005 IBM Corporation
* Joachim Fenkes <fenkes@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/console.h>
#include <linux/kobject.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/of_platform.h>
#include <asm/ibmebus.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
};
struct bus_type ibmebus_bus_type;
/* These devices will automatically be added to the bus during init */
static const struct of_device_id ibmebus_matches[] __initconst = {
{ .compatible = "IBM,lhca" },
{ .compatible = "IBM,lhea" },
{},
};
static void *ibmebus_alloc_coherent(struct device *dev,
size_t size,
dma_addr_t *dma_handle,
gfp_t flag,
unsigned long attrs)
{
void *mem;
mem = kmalloc(size, flag);
*dma_handle = (dma_addr_t)mem;
return mem;
}
static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr,
dma_addr_t dma_handle,
unsigned long attrs)
{
kfree(vaddr);
}
static dma_addr_t ibmebus_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
return (dma_addr_t)(page_address(page) + offset);
}
static void ibmebus_unmap_page(struct device *dev,
dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
return;
}
static int ibmebus_map_sg(struct device *dev,
struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = (dma_addr_t) sg_virt(sg);
sg->dma_length = sg->length;
}
return nents;
}
static void ibmebus_unmap_sg(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
return;
}
static int ibmebus_dma_supported(struct device *dev, u64 mask)
{
return mask == DMA_BIT_MASK(64);
}
static u64 ibmebus_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
}
static struct dma_map_ops ibmebus_dma_ops = {
.alloc = ibmebus_alloc_coherent,
.free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported,
.get_required_mask = ibmebus_dma_get_required_mask,
.map_page = ibmebus_map_page,
.unmap_page = ibmebus_unmap_page,
};
static int ibmebus_match_path(struct device *dev, void *data)
{
struct device_node *dn = to_platform_device(dev)->dev.of_node;
return (dn->full_name &&
(strcasecmp((char *)data, dn->full_name) == 0));
}
static int ibmebus_match_node(struct device *dev, void *data)
{
return to_platform_device(dev)->dev.of_node == data;
}
static int ibmebus_create_device(struct device_node *dn)
{
struct platform_device *dev;
int ret;
dev = of_device_alloc(dn, NULL, &ibmebus_bus_device);
if (!dev)
return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type;
dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev);
if (ret)
platform_device_put(dev);
return ret;
}
static int ibmebus_create_devices(const struct of_device_id *matches)
{
struct device_node *root, *child;
struct device *dev;
int ret = 0;
root = of_find_node_by_path("/");
for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
dev = bus_find_device(&ibmebus_bus_type, NULL, child,
ibmebus_match_node);
if (dev) {
put_device(dev);
continue;
}
ret = ibmebus_create_device(child);
if (ret) {
printk(KERN_ERR "%s: failed to create device (%i)",
__func__, ret);
of_node_put(child);
break;
}
}
of_node_put(root);
return ret;
}
int ibmebus_register_driver(struct platform_driver *drv)
{
/* If the driver uses devices that ibmebus doesn't know, add them */
ibmebus_create_devices(drv->driver.of_match_table);
drv->driver.bus = &ibmebus_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(ibmebus_register_driver);
void ibmebus_unregister_driver(struct platform_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(ibmebus_unregister_driver);
int ibmebus_request_irq(u32 ist, irq_handler_t handler,
unsigned long irq_flags, const char *devname,
void *dev_id)
{
unsigned int irq = irq_create_mapping(NULL, ist);
if (!irq)
return -EINVAL;
return request_irq(irq, handler, irq_flags, devname, dev_id);
}
EXPORT_SYMBOL(ibmebus_request_irq);
void ibmebus_free_irq(u32 ist, void *dev_id)
{
unsigned int irq = irq_find_mapping(NULL, ist);
free_irq(irq, dev_id);
irq_dispose_mapping(irq);
}
EXPORT_SYMBOL(ibmebus_free_irq);
static char *ibmebus_chomp(const char *in, size_t count)
{
char *out = kmalloc(count + 1, GFP_KERNEL);
if (!out)
return NULL;
memcpy(out, in, count);
out[count] = '\0';
if (out[count - 1] == '\n')
out[count - 1] = '\0';
return out;
}
static ssize_t ibmebus_store_probe(struct bus_type *bus,
const char *buf, size_t count)
{
struct device_node *dn = NULL;
struct device *dev;
char *path;
ssize_t rc = 0;
path = ibmebus_chomp(buf, count);
if (!path)
return -ENOMEM;
dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path);
if (dev) {
put_device(dev);
printk(KERN_WARNING "%s: %s has already been probed\n",
__func__, path);
rc = -EEXIST;
goto out;
}
if ((dn = of_find_node_by_path(path))) {
rc = ibmebus_create_device(dn);
of_node_put(dn);
} else {
printk(KERN_WARNING "%s: no such device node: %s\n",
__func__, path);
rc = -ENODEV;
}
out:
kfree(path);
if (rc)
return rc;
return count;
}
static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe);
static ssize_t ibmebus_store_remove(struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
char *path;
path = ibmebus_chomp(buf, count);
if (!path)
return -ENOMEM;
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path))) {
of_device_unregister(to_platform_device(dev));
put_device(dev);
kfree(path);
return count;
} else {
printk(KERN_WARNING "%s: %s not on the bus\n",
__func__, path);
kfree(path);
return -ENODEV;
}
}
static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove);
static struct attribute *ibmbus_bus_attrs[] = {
&bus_attr_probe.attr,
&bus_attr_remove.attr,
NULL,
};
ATTRIBUTE_GROUPS(ibmbus_bus);
static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
{
const struct of_device_id *matches = drv->of_match_table;
if (!matches)
return 0;
return of_match_device(matches, dev) != NULL;
}
static int ibmebus_bus_device_probe(struct device *dev)
{
int error = -ENODEV;
struct platform_driver *drv;
struct platform_device *of_dev;
drv = to_platform_driver(dev->driver);
of_dev = to_platform_device(dev);
if (!drv->probe)
return error;
of_dev_get(of_dev);
if (of_driver_match_device(dev, dev->driver))
error = drv->probe(of_dev);
if (error)
of_dev_put(of_dev);
return error;
}
static int ibmebus_bus_device_remove(struct device *dev)
{
struct platform_device *of_dev = to_platform_device(dev);
struct platform_driver *drv = to_platform_driver(dev->driver);
if (dev->driver && drv->remove)
drv->remove(of_dev);
return 0;
}
static void ibmebus_bus_device_shutdown(struct device *dev)
{
struct platform_device *of_dev = to_platform_device(dev);
struct platform_driver *drv = to_platform_driver(dev->driver);
if (dev->driver && drv->shutdown)
drv->shutdown(of_dev);
}
/*
* ibmebus_bus_device_attrs
*/
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
}
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
buf[len] = '\n';
buf[len+1] = 0;
return len+1;
}
static struct device_attribute ibmebus_bus_device_attrs[] = {
__ATTR_RO(devspec),
__ATTR_RO(name),
__ATTR_RO(modalias),
__ATTR_NULL
};
struct bus_type ibmebus_bus_type = {
.name = "ibmebus",
.uevent = of_device_uevent_modalias,
.bus_groups = ibmbus_bus_groups,
.match = ibmebus_bus_bus_match,
.probe = ibmebus_bus_device_probe,
.remove = ibmebus_bus_device_remove,
.shutdown = ibmebus_bus_device_shutdown,
.dev_attrs = ibmebus_bus_device_attrs,
};
EXPORT_SYMBOL(ibmebus_bus_type);
static int __init ibmebus_bus_init(void)
{
int err;
printk(KERN_INFO "IBM eBus Device Driver\n");
err = bus_register(&ibmebus_bus_type);
if (err) {
printk(KERN_ERR "%s: failed to register IBM eBus.\n",
__func__);
return err;
}
err = device_register(&ibmebus_bus_device);
if (err) {
printk(KERN_WARNING "%s: device_register returned %i\n",
__func__, err);
bus_unregister(&ibmebus_bus_type);
return err;
}
err = ibmebus_create_devices(ibmebus_matches);
if (err) {
device_unregister(&ibmebus_bus_device);
bus_unregister(&ibmebus_bus_type);
return err;
}
return 0;
}
postcore_initcall(ibmebus_bus_init);

Visa fil

@@ -221,7 +221,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
return -1;
}
static void pSeries_lpar_hptab_clear(void)
static void manual_hpte_clear_all(void)
{
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
@@ -249,6 +249,26 @@ static void pSeries_lpar_hptab_clear(void)
&(ptes[j].pteh), &(ptes[j].ptel));
}
}
}
static int hcall_hpte_clear_all(void)
{
int rc;
do {
rc = plpar_hcall_norets(H_CLEAR_HPT);
} while (rc == H_CONTINUE);
return rc;
}
static void pseries_hpte_clear_all(void)
{
int rc;
rc = hcall_hpte_clear_all();
if (rc != H_SUCCESS)
manual_hpte_clear_all();
#ifdef __LITTLE_ENDIAN__
/*
@@ -598,7 +618,7 @@ void __init hpte_init_pseries(void)
mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear;
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
}

Visa fil

@@ -37,6 +37,7 @@
#include <asm/mmu.h>
#include <asm/machdep.h>
#include "pseries.h"
/*
* This isn't a module but we expose that to userspace

Visa fil

@@ -79,4 +79,23 @@ extern struct pci_controller_ops pseries_pci_controller_ops;
unsigned long pseries_memory_block_size(void);
extern int CMO_PrPSP;
extern int CMO_SecPSP;
extern unsigned long CMO_PageSize;
static inline int cmo_get_primary_psp(void)
{
return CMO_PrPSP;
}
static inline int cmo_get_secondary_psp(void)
{
return CMO_SecPSP;
}
static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
#endif /* _PSERIES_PSERIES_H */

Visa fil

@@ -367,7 +367,7 @@ void pseries_disable_reloc_on_exc(void)
}
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
static void pSeries_machine_kexec(struct kimage *image)
{
if (firmware_has_feature(FW_FEATURE_SET_MODE))
@@ -725,7 +725,7 @@ define_machine(pseries) {
.progress = rtas_progress,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
.machine_kexec = pSeries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif

Filskillnaden har hållits tillbaka eftersom den är för stor Load Diff