Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman: "Here's a first pull request for powerpc updates for 3.18. The bulk of the additions are for the "cxl" driver, for IBM's Coherent Accelerator Processor Interface (CAPI). Most of it's in drivers/misc, which Greg & Arnd maintain, Greg said he was happy for us to take it through our tree. There's the usual minor cleanups and fixes, including a bit of noise in drivers from some of those. A bunch of updates to our EEH code, which has been getting more testing. Several nice speedups from Anton, including 20% in clear_page(). And a bunch of updates for freescale from Scott" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (130 commits) cxl: Fix afu_read() not doing finish_wait() on signal or non-blocking cxl: Add documentation for userspace APIs cxl: Add driver to Kbuild and Makefiles cxl: Add userspace header file cxl: Driver code for powernv PCIe based cards for userspace access cxl: Add base builtin support powerpc/mm: Add hooks for cxl powerpc/opal: Add PHB to cxl mode call powerpc/mm: Add new hash_page_mm() powerpc/powerpc: Add new PCIe functions for allocating cxl interrupts cxl: Add new header for call backs and structs powerpc/powernv: Split out set MSI IRQ chip code powerpc/mm: Export mmu_kernel_ssize and mmu_linear_psize powerpc/msi: Improve IRQ bitmap allocator powerpc/cell: Make spu_flush_all_slbs() generic powerpc/cell: Move data segment faulting code out of cell platform powerpc/cell: Move spu_handle_mm_fault() out of cell platform powerpc/pseries: Use new defines when calling H_SET_MODE powerpc: Update contact info in Documentation files powerpc/perf/hv-24x7: Simplify catalog_read() ...
This commit is contained in:
@@ -93,6 +93,9 @@ obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
|
||||
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
obj-$(CONFIG_MODULES) += ppc_ksyms.o
|
||||
ifeq ($(CONFIG_PPC32),y)
|
||||
obj-$(CONFIG_MODULES) += ppc_ksyms_32.o
|
||||
endif
|
||||
obj-$(CONFIG_BOOTX_TEXT) += btext.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
|
@@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/kdump.h>
|
||||
|
@@ -106,10 +106,14 @@ int __init swiotlb_setup_bus_notifier(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void swiotlb_detect_4g(void)
|
||||
void __init swiotlb_detect_4g(void)
|
||||
{
|
||||
if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
|
||||
if ((memblock_end_of_DRAM() - 1) > 0xffffffff) {
|
||||
ppc_swiotlb_enable = 1;
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int __init swiotlb_late_init(void)
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <asm/vio.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
||||
/*
|
||||
* Generic direct DMA implementation
|
||||
@@ -25,6 +26,18 @@
|
||||
* default the offset is PCI_DRAM_OFFSET.
|
||||
*/
|
||||
|
||||
static u64 __maybe_unused get_pfn_limit(struct device *dev)
|
||||
{
|
||||
u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
|
||||
struct dev_archdata __maybe_unused *sd = &dev->archdata;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
|
||||
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
@@ -40,6 +53,26 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
#else
|
||||
struct page *page;
|
||||
int node = dev_to_node(dev);
|
||||
u64 pfn = get_pfn_limit(dev);
|
||||
int zone;
|
||||
|
||||
zone = dma_pfn_limit_to_zone(pfn);
|
||||
if (zone < 0) {
|
||||
dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
|
||||
__func__, pfn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
switch (zone) {
|
||||
case ZONE_DMA:
|
||||
flag |= GFP_DMA;
|
||||
break;
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
case ZONE_DMA32:
|
||||
flag |= GFP_DMA32;
|
||||
break;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* ignore region specifiers */
|
||||
flag &= ~(__GFP_HIGHMEM);
|
||||
@@ -202,6 +235,7 @@ int __dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
*dev->dma_mask = dma_mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
if (ppc_md.dma_set_mask)
|
||||
@@ -210,13 +244,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
}
|
||||
EXPORT_SYMBOL(dma_set_mask);
|
||||
|
||||
u64 dma_get_required_mask(struct device *dev)
|
||||
u64 __dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
if (ppc_md.dma_get_required_mask)
|
||||
return ppc_md.dma_get_required_mask(dev);
|
||||
|
||||
if (unlikely(dma_ops == NULL))
|
||||
return 0;
|
||||
|
||||
@@ -225,6 +256,14 @@ u64 dma_get_required_mask(struct device *dev)
|
||||
|
||||
return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
|
||||
}
|
||||
|
||||
u64 dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
if (ppc_md.dma_get_required_mask)
|
||||
return ppc_md.dma_get_required_mask(dev);
|
||||
|
||||
return __dma_get_required_mask(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_get_required_mask);
|
||||
|
||||
static int __init dma_init(void)
|
||||
|
@@ -117,7 +117,7 @@ static DEFINE_MUTEX(eeh_dev_mutex);
|
||||
* not dynamically alloced, so that it ends up in RMO where RTAS
|
||||
* can access it.
|
||||
*/
|
||||
#define EEH_PCI_REGS_LOG_LEN 4096
|
||||
#define EEH_PCI_REGS_LOG_LEN 8192
|
||||
static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
|
||||
|
||||
/*
|
||||
@@ -148,16 +148,12 @@ static int __init eeh_setup(char *str)
|
||||
}
|
||||
__setup("eeh=", eeh_setup);
|
||||
|
||||
/**
|
||||
* eeh_gather_pci_data - Copy assorted PCI config space registers to buff
|
||||
* @edev: device to report data for
|
||||
* @buf: point to buffer in which to log
|
||||
* @len: amount of room in buffer
|
||||
*
|
||||
* This routine captures assorted PCI configuration space data,
|
||||
* and puts them into a buffer for RTAS error logging.
|
||||
/*
|
||||
* This routine captures assorted PCI configuration space data
|
||||
* for the indicated PCI device, and puts them into a buffer
|
||||
* for RTAS error logging.
|
||||
*/
|
||||
static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
|
||||
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
|
||||
{
|
||||
struct device_node *dn = eeh_dev_to_of_node(edev);
|
||||
u32 cfg;
|
||||
@@ -255,6 +251,19 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
|
||||
return n;
|
||||
}
|
||||
|
||||
static void *eeh_dump_pe_log(void *data, void *flag)
|
||||
{
|
||||
struct eeh_pe *pe = data;
|
||||
struct eeh_dev *edev, *tmp;
|
||||
size_t *plen = flag;
|
||||
|
||||
eeh_pe_for_each_dev(pe, edev, tmp)
|
||||
*plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
|
||||
EEH_PCI_REGS_LOG_LEN - *plen);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_slot_error_detail - Generate combined log including driver log and error log
|
||||
* @pe: EEH PE
|
||||
@@ -268,7 +277,6 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
|
||||
void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
|
||||
{
|
||||
size_t loglen = 0;
|
||||
struct eeh_dev *edev, *tmp;
|
||||
|
||||
/*
|
||||
* When the PHB is fenced or dead, it's pointless to collect
|
||||
@@ -286,10 +294,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
pci_regs_buf[0] = 0;
|
||||
eeh_pe_for_each_dev(pe, edev, tmp) {
|
||||
loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
|
||||
EEH_PCI_REGS_LOG_LEN - loglen);
|
||||
}
|
||||
eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
|
||||
}
|
||||
|
||||
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
|
||||
@@ -410,7 +415,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
|
||||
}
|
||||
dn = eeh_dev_to_of_node(edev);
|
||||
dev = eeh_dev_to_pci_dev(edev);
|
||||
pe = edev->pe;
|
||||
pe = eeh_dev_to_pe(edev);
|
||||
|
||||
/* Access to IO BARs might get this far and still not want checking. */
|
||||
if (!pe) {
|
||||
@@ -542,17 +547,16 @@ EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
|
||||
|
||||
/**
|
||||
* eeh_check_failure - Check if all 1's data is due to EEH slot freeze
|
||||
* @token: I/O token, should be address in the form 0xA....
|
||||
* @val: value, should be all 1's (XXX why do we need this arg??)
|
||||
* @token: I/O address
|
||||
*
|
||||
* Check for an EEH failure at the given token address. Call this
|
||||
* Check for an EEH failure at the given I/O address. Call this
|
||||
* routine if the result of a read was all 0xff's and you want to
|
||||
* find out if this is due to an EEH slot freeze event. This routine
|
||||
* find out if this is due to an EEH slot freeze event. This routine
|
||||
* will query firmware for the EEH status.
|
||||
*
|
||||
* Note this routine is safe to call in an interrupt context.
|
||||
*/
|
||||
unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
|
||||
int eeh_check_failure(const volatile void __iomem *token)
|
||||
{
|
||||
unsigned long addr;
|
||||
struct eeh_dev *edev;
|
||||
@@ -562,13 +566,11 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
|
||||
edev = eeh_addr_cache_get_dev(addr);
|
||||
if (!edev) {
|
||||
eeh_stats.no_device++;
|
||||
return val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
eeh_dev_check_failure(edev);
|
||||
return val;
|
||||
return eeh_dev_check_failure(edev);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(eeh_check_failure);
|
||||
|
||||
|
||||
@@ -582,25 +584,51 @@ EXPORT_SYMBOL(eeh_check_failure);
|
||||
*/
|
||||
int eeh_pci_enable(struct eeh_pe *pe, int function)
|
||||
{
|
||||
int rc, flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
|
||||
int active_flag, rc;
|
||||
|
||||
/*
|
||||
* pHyp doesn't allow to enable IO or DMA on unfrozen PE.
|
||||
* Also, it's pointless to enable them on unfrozen PE. So
|
||||
* we have the check here.
|
||||
* we have to check before enabling IO or DMA.
|
||||
*/
|
||||
if (function == EEH_OPT_THAW_MMIO ||
|
||||
function == EEH_OPT_THAW_DMA) {
|
||||
switch (function) {
|
||||
case EEH_OPT_THAW_MMIO:
|
||||
active_flag = EEH_STATE_MMIO_ACTIVE;
|
||||
break;
|
||||
case EEH_OPT_THAW_DMA:
|
||||
active_flag = EEH_STATE_DMA_ACTIVE;
|
||||
break;
|
||||
case EEH_OPT_DISABLE:
|
||||
case EEH_OPT_ENABLE:
|
||||
case EEH_OPT_FREEZE_PE:
|
||||
active_flag = 0;
|
||||
break;
|
||||
default:
|
||||
pr_warn("%s: Invalid function %d\n",
|
||||
__func__, function);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if IO or DMA has been enabled before
|
||||
* enabling them.
|
||||
*/
|
||||
if (active_flag) {
|
||||
rc = eeh_ops->get_state(pe, NULL);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Needn't to enable or already enabled */
|
||||
if ((rc == EEH_STATE_NOT_SUPPORT) ||
|
||||
((rc & flags) == flags))
|
||||
/* Needn't enable it at all */
|
||||
if (rc == EEH_STATE_NOT_SUPPORT)
|
||||
return 0;
|
||||
|
||||
/* It's already enabled */
|
||||
if (rc & active_flag)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Issue the request */
|
||||
rc = eeh_ops->set_option(pe, function);
|
||||
if (rc)
|
||||
pr_warn("%s: Unexpected state change %d on "
|
||||
@@ -608,17 +636,17 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
|
||||
__func__, function, pe->phb->global_number,
|
||||
pe->addr, rc);
|
||||
|
||||
rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
|
||||
if (rc <= 0)
|
||||
return rc;
|
||||
/* Check if the request is finished successfully */
|
||||
if (active_flag) {
|
||||
rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
|
||||
if (rc <= 0)
|
||||
return rc;
|
||||
|
||||
if ((function == EEH_OPT_THAW_MMIO) &&
|
||||
(rc & EEH_STATE_MMIO_ENABLED))
|
||||
return 0;
|
||||
if (rc & active_flag)
|
||||
return 0;
|
||||
|
||||
if ((function == EEH_OPT_THAW_DMA) &&
|
||||
(rc & EEH_STATE_DMA_ENABLED))
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -634,7 +662,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
|
||||
int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
|
||||
{
|
||||
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
|
||||
struct eeh_pe *pe = edev->pe;
|
||||
struct eeh_pe *pe = eeh_dev_to_pe(edev);
|
||||
|
||||
if (!pe) {
|
||||
pr_err("%s: No PE found on PCI device %s\n",
|
||||
@@ -645,14 +673,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
|
||||
switch (state) {
|
||||
case pcie_deassert_reset:
|
||||
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
|
||||
eeh_pe_state_clear(pe, EEH_PE_RESET);
|
||||
break;
|
||||
case pcie_hot_reset:
|
||||
eeh_pe_state_mark(pe, EEH_PE_RESET);
|
||||
eeh_ops->reset(pe, EEH_RESET_HOT);
|
||||
break;
|
||||
case pcie_warm_reset:
|
||||
eeh_pe_state_mark(pe, EEH_PE_RESET);
|
||||
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
|
||||
break;
|
||||
default:
|
||||
eeh_pe_state_clear(pe, EEH_PE_RESET);
|
||||
return -EINVAL;
|
||||
};
|
||||
|
||||
@@ -1141,6 +1173,85 @@ void eeh_remove_device(struct pci_dev *dev)
|
||||
edev->mode &= ~EEH_DEV_SYSFS;
|
||||
}
|
||||
|
||||
int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
|
||||
if (ret) {
|
||||
pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
|
||||
__func__, ret, pe->phb->global_number, pe->addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
|
||||
if (ret) {
|
||||
pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
|
||||
__func__, ret, pe->phb->global_number, pe->addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Clear software isolated state */
|
||||
if (sw_state && (pe->state & EEH_PE_ISOLATED))
|
||||
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static struct pci_device_id eeh_reset_ids[] = {
|
||||
{ PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
|
||||
{ PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
static int eeh_pe_change_owner(struct eeh_pe *pe)
|
||||
{
|
||||
struct eeh_dev *edev, *tmp;
|
||||
struct pci_dev *pdev;
|
||||
struct pci_device_id *id;
|
||||
int flags, ret;
|
||||
|
||||
/* Check PE state */
|
||||
flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
|
||||
ret = eeh_ops->get_state(pe, NULL);
|
||||
if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
|
||||
return 0;
|
||||
|
||||
/* Unfrozen PE, nothing to do */
|
||||
if ((ret & flags) == flags)
|
||||
return 0;
|
||||
|
||||
/* Frozen PE, check if it needs PE level reset */
|
||||
eeh_pe_for_each_dev(pe, edev, tmp) {
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (!pdev)
|
||||
continue;
|
||||
|
||||
for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
|
||||
if (id->vendor != PCI_ANY_ID &&
|
||||
id->vendor != pdev->vendor)
|
||||
continue;
|
||||
if (id->device != PCI_ANY_ID &&
|
||||
id->device != pdev->device)
|
||||
continue;
|
||||
if (id->subvendor != PCI_ANY_ID &&
|
||||
id->subvendor != pdev->subsystem_vendor)
|
||||
continue;
|
||||
if (id->subdevice != PCI_ANY_ID &&
|
||||
id->subdevice != pdev->subsystem_device)
|
||||
continue;
|
||||
|
||||
goto reset;
|
||||
}
|
||||
}
|
||||
|
||||
return eeh_unfreeze_pe(pe, true);
|
||||
|
||||
reset:
|
||||
return eeh_pe_reset_and_recover(pe);
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_dev_open - Increase count of pass through devices for PE
|
||||
* @pdev: PCI device
|
||||
@@ -1153,6 +1264,7 @@ void eeh_remove_device(struct pci_dev *dev)
|
||||
int eeh_dev_open(struct pci_dev *pdev)
|
||||
{
|
||||
struct eeh_dev *edev;
|
||||
int ret = -ENODEV;
|
||||
|
||||
mutex_lock(&eeh_dev_mutex);
|
||||
|
||||
@@ -1165,6 +1277,16 @@ int eeh_dev_open(struct pci_dev *pdev)
|
||||
if (!edev || !edev->pe)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* The PE might have been put into frozen state, but we
|
||||
* didn't detect that yet. The passed through PCI devices
|
||||
* in frozen PE won't work properly. Clear the frozen state
|
||||
* in advance.
|
||||
*/
|
||||
ret = eeh_pe_change_owner(edev->pe);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Increase PE's pass through count */
|
||||
atomic_inc(&edev->pe->pass_dev_cnt);
|
||||
mutex_unlock(&eeh_dev_mutex);
|
||||
@@ -1172,7 +1294,7 @@ int eeh_dev_open(struct pci_dev *pdev)
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&eeh_dev_mutex);
|
||||
return -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_dev_open);
|
||||
|
||||
@@ -1202,6 +1324,7 @@ void eeh_dev_release(struct pci_dev *pdev)
|
||||
/* Decrease PE's pass through count */
|
||||
atomic_dec(&edev->pe->pass_dev_cnt);
|
||||
WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0);
|
||||
eeh_pe_change_owner(edev->pe);
|
||||
out:
|
||||
mutex_unlock(&eeh_dev_mutex);
|
||||
}
|
||||
@@ -1281,8 +1404,10 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
|
||||
*/
|
||||
switch (option) {
|
||||
case EEH_OPT_ENABLE:
|
||||
if (eeh_enabled())
|
||||
if (eeh_enabled()) {
|
||||
ret = eeh_pe_change_owner(pe);
|
||||
break;
|
||||
}
|
||||
ret = -EIO;
|
||||
break;
|
||||
case EEH_OPT_DISABLE:
|
||||
@@ -1294,7 +1419,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = eeh_ops->set_option(pe, option);
|
||||
ret = eeh_pci_enable(pe, option);
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: Option %d out of range (%d, %d)\n",
|
||||
@@ -1345,6 +1470,36 @@ int eeh_pe_get_state(struct eeh_pe *pe)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_pe_get_state);
|
||||
|
||||
static int eeh_pe_reenable_devices(struct eeh_pe *pe)
|
||||
{
|
||||
struct eeh_dev *edev, *tmp;
|
||||
struct pci_dev *pdev;
|
||||
int ret = 0;
|
||||
|
||||
/* Restore config space */
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
/*
|
||||
* Reenable PCI devices as the devices passed
|
||||
* through are always enabled before the reset.
|
||||
*/
|
||||
eeh_pe_for_each_dev(pe, edev, tmp) {
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (!pdev)
|
||||
continue;
|
||||
|
||||
ret = pci_reenable_device(pdev);
|
||||
if (ret) {
|
||||
pr_warn("%s: Failure %d reenabling %s\n",
|
||||
__func__, ret, pci_name(pdev));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* The PE is still in frozen state */
|
||||
return eeh_unfreeze_pe(pe, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_pe_reset - Issue PE reset according to specified type
|
||||
* @pe: EEH PE
|
||||
@@ -1368,23 +1523,22 @@ int eeh_pe_reset(struct eeh_pe *pe, int option)
|
||||
switch (option) {
|
||||
case EEH_RESET_DEACTIVATE:
|
||||
ret = eeh_ops->reset(pe, option);
|
||||
eeh_pe_state_clear(pe, EEH_PE_RESET);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/*
|
||||
* The PE is still in frozen state and we need to clear
|
||||
* that. It's good to clear frozen state after deassert
|
||||
* to avoid messy IO access during reset, which might
|
||||
* cause recursive frozen PE.
|
||||
*/
|
||||
ret = eeh_ops->set_option(pe, EEH_OPT_THAW_MMIO);
|
||||
if (!ret)
|
||||
ret = eeh_ops->set_option(pe, EEH_OPT_THAW_DMA);
|
||||
if (!ret)
|
||||
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
|
||||
ret = eeh_pe_reenable_devices(pe);
|
||||
break;
|
||||
case EEH_RESET_HOT:
|
||||
case EEH_RESET_FUNDAMENTAL:
|
||||
/*
|
||||
* Proactively freeze the PE to drop all MMIO access
|
||||
* during reset, which should be banned as it's always
|
||||
* cause recursive EEH error.
|
||||
*/
|
||||
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
|
||||
|
||||
eeh_pe_state_mark(pe, EEH_PE_RESET);
|
||||
ret = eeh_ops->reset(pe, option);
|
||||
break;
|
||||
default:
|
||||
@@ -1413,9 +1567,6 @@ int eeh_pe_configure(struct eeh_pe *pe)
|
||||
if (!pe)
|
||||
return -ENODEV;
|
||||
|
||||
/* Restore config space for the affected devices */
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eeh_pe_configure);
|
||||
|
@@ -180,6 +180,22 @@ static bool eeh_dev_removed(struct eeh_dev *edev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void *eeh_dev_save_state(void *data, void *userdata)
|
||||
{
|
||||
struct eeh_dev *edev = data;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
if (!edev)
|
||||
return NULL;
|
||||
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (!pdev)
|
||||
return NULL;
|
||||
|
||||
pci_save_state(pdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_report_error - Report pci error to each device driver
|
||||
* @data: eeh device
|
||||
@@ -303,6 +319,22 @@ static void *eeh_report_reset(void *data, void *userdata)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *eeh_dev_restore_state(void *data, void *userdata)
|
||||
{
|
||||
struct eeh_dev *edev = data;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
if (!edev)
|
||||
return NULL;
|
||||
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (!pdev)
|
||||
return NULL;
|
||||
|
||||
pci_restore_state(pdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_report_resume - Tell device to resume normal operations
|
||||
* @data: eeh device
|
||||
@@ -450,38 +482,82 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
|
||||
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
|
||||
{
|
||||
struct eeh_pe *pe = (struct eeh_pe *)data;
|
||||
int i, rc;
|
||||
bool *clear_sw_state = flag;
|
||||
int i, rc = 1;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
|
||||
if (rc)
|
||||
continue;
|
||||
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
|
||||
if (!rc)
|
||||
break;
|
||||
}
|
||||
for (i = 0; rc && i < 3; i++)
|
||||
rc = eeh_unfreeze_pe(pe, clear_sw_state);
|
||||
|
||||
/* The PE has been isolated, clear it */
|
||||
/* Stop immediately on any errors */
|
||||
if (rc) {
|
||||
pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, rc);
|
||||
pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
|
||||
__func__, rc, pe->phb->global_number, pe->addr);
|
||||
return (void *)pe;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
|
||||
static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
|
||||
bool clear_sw_state)
|
||||
{
|
||||
void *rc;
|
||||
|
||||
rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
|
||||
rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
|
||||
if (!rc)
|
||||
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
|
||||
|
||||
return rc ? -EIO : 0;
|
||||
}
|
||||
|
||||
int eeh_pe_reset_and_recover(struct eeh_pe *pe)
|
||||
{
|
||||
int result, ret;
|
||||
|
||||
/* Bail if the PE is being recovered */
|
||||
if (pe->state & EEH_PE_RECOVERING)
|
||||
return 0;
|
||||
|
||||
/* Put the PE into recovery mode */
|
||||
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
||||
|
||||
/* Save states */
|
||||
eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
|
||||
|
||||
/* Report error */
|
||||
eeh_pe_dev_traverse(pe, eeh_report_error, &result);
|
||||
|
||||
/* Issue reset */
|
||||
eeh_pe_state_mark(pe, EEH_PE_RESET);
|
||||
ret = eeh_reset_pe(pe);
|
||||
if (ret) {
|
||||
eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET);
|
||||
return ret;
|
||||
}
|
||||
eeh_pe_state_clear(pe, EEH_PE_RESET);
|
||||
|
||||
/* Unfreeze the PE */
|
||||
ret = eeh_clear_pe_frozen_state(pe, true);
|
||||
if (ret) {
|
||||
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Notify completion of reset */
|
||||
eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
|
||||
|
||||
/* Restore device state */
|
||||
eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
|
||||
|
||||
/* Resume */
|
||||
eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
|
||||
|
||||
/* Clear recovery mode */
|
||||
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* eeh_reset_device - Perform actual reset of a pci slot
|
||||
* @pe: EEH PE
|
||||
@@ -540,7 +616,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
||||
eeh_pe_state_clear(pe, EEH_PE_RESET);
|
||||
|
||||
/* Clear frozen state */
|
||||
rc = eeh_clear_pe_frozen_state(pe);
|
||||
rc = eeh_clear_pe_frozen_state(pe, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@@ -428,7 +428,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
|
||||
}
|
||||
|
||||
/* Remove the EEH device */
|
||||
pe = edev->pe;
|
||||
pe = eeh_dev_to_pe(edev);
|
||||
edev->pe = NULL;
|
||||
list_del(&edev->list);
|
||||
|
||||
@@ -584,6 +584,8 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
|
||||
{
|
||||
struct eeh_pe *pe = (struct eeh_pe *)data;
|
||||
int state = *((int *)flag);
|
||||
struct eeh_dev *edev, *tmp;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
/* Keep the state of permanently removed PE intact */
|
||||
if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) &&
|
||||
@@ -592,9 +594,22 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
|
||||
|
||||
pe->state &= ~state;
|
||||
|
||||
/* Clear check count since last isolation */
|
||||
if (state & EEH_PE_ISOLATED)
|
||||
pe->check_count = 0;
|
||||
/*
|
||||
* Special treatment on clearing isolated state. Clear
|
||||
* check count since last isolation and put all affected
|
||||
* devices to normal state.
|
||||
*/
|
||||
if (!(state & EEH_PE_ISOLATED))
|
||||
return NULL;
|
||||
|
||||
pe->check_count = 0;
|
||||
eeh_pe_for_each_dev(pe, edev, tmp) {
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (!pdev)
|
||||
continue;
|
||||
|
||||
pdev->error_state = pci_channel_io_normal;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -54,6 +54,43 @@ EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
|
||||
EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
|
||||
EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
|
||||
|
||||
static ssize_t eeh_pe_state_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
|
||||
int state;
|
||||
|
||||
if (!edev || !edev->pe)
|
||||
return -ENODEV;
|
||||
|
||||
state = eeh_ops->get_state(edev->pe, NULL);
|
||||
return sprintf(buf, "%0x08x %0x08x\n",
|
||||
state, edev->pe->state);
|
||||
}
|
||||
|
||||
static ssize_t eeh_pe_state_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
|
||||
|
||||
if (!edev || !edev->pe)
|
||||
return -ENODEV;
|
||||
|
||||
/* Nothing to do if it's not frozen */
|
||||
if (!(edev->pe->state & EEH_PE_ISOLATED))
|
||||
return count;
|
||||
|
||||
if (eeh_unfreeze_pe(edev->pe, true))
|
||||
return -EIO;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RW(eeh_pe_state);
|
||||
|
||||
void eeh_sysfs_add_device(struct pci_dev *pdev)
|
||||
{
|
||||
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
|
||||
@@ -68,9 +105,10 @@ void eeh_sysfs_add_device(struct pci_dev *pdev)
|
||||
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
|
||||
rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
|
||||
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
|
||||
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_state);
|
||||
|
||||
if (rc)
|
||||
printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
|
||||
pr_warn("EEH: Unable to create sysfs entries\n");
|
||||
else if (edev)
|
||||
edev->mode |= EEH_DEV_SYSFS;
|
||||
}
|
||||
@@ -92,6 +130,7 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
|
||||
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
|
||||
device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
|
||||
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
|
||||
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state);
|
||||
|
||||
if (edev)
|
||||
edev->mode &= ~EEH_DEV_SYSFS;
|
||||
|
@@ -104,12 +104,15 @@ turn_on_mmu:
|
||||
* task's thread_struct.
|
||||
*/
|
||||
#define EXCEPTION_PROLOG \
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10; \
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11; \
|
||||
mfcr r10; \
|
||||
EXCEPTION_PROLOG_0; \
|
||||
EXCEPTION_PROLOG_1; \
|
||||
EXCEPTION_PROLOG_2
|
||||
|
||||
#define EXCEPTION_PROLOG_0 \
|
||||
mtspr SPRN_SPRG_SCRATCH0,r10; \
|
||||
mtspr SPRN_SPRG_SCRATCH1,r11; \
|
||||
mfcr r10
|
||||
|
||||
#define EXCEPTION_PROLOG_1 \
|
||||
mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
|
||||
andi. r11,r11,MSR_PR; \
|
||||
@@ -144,6 +147,14 @@ turn_on_mmu:
|
||||
SAVE_4GPRS(3, r11); \
|
||||
SAVE_2GPRS(7, r11)
|
||||
|
||||
/*
|
||||
* Exception exit code.
|
||||
*/
|
||||
#define EXCEPTION_EPILOG_0 \
|
||||
mtcr r10; \
|
||||
mfspr r10,SPRN_SPRG_SCRATCH0; \
|
||||
mfspr r11,SPRN_SPRG_SCRATCH1
|
||||
|
||||
/*
|
||||
* Note: code which follows this uses cr0.eq (set if from kernel),
|
||||
* r11, r12 (SRR0), and r9 (SRR1).
|
||||
@@ -293,16 +304,8 @@ InstructionTLBMiss:
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
stw r3, 8(r0)
|
||||
#endif
|
||||
DO_8xx_CPU6(0x3f80, r3)
|
||||
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
|
||||
mfcr r10
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
stw r10, 0(r0)
|
||||
stw r11, 4(r0)
|
||||
#else
|
||||
mtspr SPRN_DAR, r10
|
||||
mtspr SPRN_SPRG2, r11
|
||||
#endif
|
||||
EXCEPTION_PROLOG_0
|
||||
mtspr SPRN_SPRG_SCRATCH2, r10
|
||||
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
|
||||
#ifdef CONFIG_8xx_CPU15
|
||||
addi r11, r10, 0x1000
|
||||
@@ -359,18 +362,11 @@ InstructionTLBMiss:
|
||||
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
|
||||
|
||||
/* Restore registers */
|
||||
#ifndef CONFIG_8xx_CPU6
|
||||
mfspr r10, SPRN_DAR
|
||||
mtcr r10
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
mfspr r11, SPRN_SPRG2
|
||||
#else
|
||||
lwz r11, 0(r0)
|
||||
mtcr r11
|
||||
lwz r11, 4(r0)
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lwz r3, 8(r0)
|
||||
#endif
|
||||
mfspr r10, SPRN_M_TW
|
||||
mfspr r10, SPRN_SPRG_SCRATCH2
|
||||
EXCEPTION_EPILOG_0
|
||||
rfi
|
||||
2:
|
||||
mfspr r11, SPRN_SRR1
|
||||
@@ -381,19 +377,11 @@ InstructionTLBMiss:
|
||||
mtspr SPRN_SRR1, r11
|
||||
|
||||
/* Restore registers */
|
||||
#ifndef CONFIG_8xx_CPU6
|
||||
mfspr r10, SPRN_DAR
|
||||
mtcr r10
|
||||
li r11, 0x00f0
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
mfspr r11, SPRN_SPRG2
|
||||
#else
|
||||
lwz r11, 0(r0)
|
||||
mtcr r11
|
||||
lwz r11, 4(r0)
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lwz r3, 8(r0)
|
||||
#endif
|
||||
mfspr r10, SPRN_M_TW
|
||||
mfspr r10, SPRN_SPRG_SCRATCH2
|
||||
EXCEPTION_EPILOG_0
|
||||
b InstructionAccess
|
||||
|
||||
. = 0x1200
|
||||
@@ -401,16 +389,8 @@ DataStoreTLBMiss:
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
stw r3, 8(r0)
|
||||
#endif
|
||||
DO_8xx_CPU6(0x3f80, r3)
|
||||
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
|
||||
mfcr r10
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
stw r10, 0(r0)
|
||||
stw r11, 4(r0)
|
||||
#else
|
||||
mtspr SPRN_DAR, r10
|
||||
mtspr SPRN_SPRG2, r11
|
||||
#endif
|
||||
EXCEPTION_PROLOG_0
|
||||
mtspr SPRN_SPRG_SCRATCH2, r10
|
||||
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
|
||||
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
@@ -483,19 +463,12 @@ DataStoreTLBMiss:
|
||||
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
|
||||
|
||||
/* Restore registers */
|
||||
#ifndef CONFIG_8xx_CPU6
|
||||
mfspr r10, SPRN_DAR
|
||||
mtcr r10
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
mfspr r11, SPRN_SPRG2
|
||||
#else
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
lwz r11, 0(r0)
|
||||
mtcr r11
|
||||
lwz r11, 4(r0)
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lwz r3, 8(r0)
|
||||
#endif
|
||||
mfspr r10, SPRN_M_TW
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
mfspr r10, SPRN_SPRG_SCRATCH2
|
||||
EXCEPTION_EPILOG_0
|
||||
rfi
|
||||
|
||||
/* This is an instruction TLB error on the MPC8xx. This could be due
|
||||
@@ -507,35 +480,18 @@ InstructionTLBError:
|
||||
b InstructionAccess
|
||||
|
||||
/* This is the data TLB error on the MPC8xx. This could be due to
|
||||
* many reasons, including a dirty update to a pte. We can catch that
|
||||
* one here, but anything else is an error. First, we track down the
|
||||
* Linux pte. If it is valid, write access is allowed, but the
|
||||
* page dirty bit is not set, we will set it and reload the TLB. For
|
||||
* any other case, we bail out to a higher level function that can
|
||||
* handle it.
|
||||
* many reasons, including a dirty update to a pte. We bail out to
|
||||
* a higher level function that can handle it.
|
||||
*/
|
||||
. = 0x1400
|
||||
DataTLBError:
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
stw r3, 8(r0)
|
||||
#endif
|
||||
DO_8xx_CPU6(0x3f80, r3)
|
||||
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
|
||||
mfcr r10
|
||||
stw r10, 0(r0)
|
||||
stw r11, 4(r0)
|
||||
EXCEPTION_PROLOG_0
|
||||
|
||||
mfspr r10, SPRN_DAR
|
||||
cmpwi cr0, r10, 0x00f0
|
||||
mfspr r11, SPRN_DAR
|
||||
cmpwi cr0, r11, 0x00f0
|
||||
beq- FixupDAR /* must be a buggy dcbX, icbi insn. */
|
||||
DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR */
|
||||
mfspr r10, SPRN_M_TW /* Restore registers */
|
||||
lwz r11, 0(r0)
|
||||
mtcr r11
|
||||
lwz r11, 4(r0)
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lwz r3, 8(r0)
|
||||
#endif
|
||||
DARFixed:/* Return from dcbx instruction bug workaround */
|
||||
EXCEPTION_EPILOG_0
|
||||
b DataAccess
|
||||
|
||||
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
|
||||
@@ -559,11 +515,15 @@ DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR
|
||||
|
||||
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
|
||||
* by decoding the registers used by the dcbx instruction and adding them.
|
||||
* DAR is set to the calculated address and r10 also holds the EA on exit.
|
||||
* DAR is set to the calculated address.
|
||||
*/
|
||||
/* define if you don't want to use self modifying code */
|
||||
#define NO_SELF_MODIFYING_CODE
|
||||
FixupDAR:/* Entry point for dcbx workaround. */
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
stw r3, 8(r0)
|
||||
#endif
|
||||
mtspr SPRN_SPRG_SCRATCH2, r10
|
||||
/* fetch instruction from memory. */
|
||||
mfspr r10, SPRN_SRR0
|
||||
andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
|
||||
@@ -579,16 +539,17 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
|
||||
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
|
||||
lwz r11, 0(r11) /* Get the pte */
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lwz r3, 8(r0) /* restore r3 from memory */
|
||||
#endif
|
||||
/* concat physical page address(r11) and page offset(r10) */
|
||||
rlwimi r11, r10, 0, 20, 31
|
||||
lwz r11,0(r11)
|
||||
/* Check if it really is a dcbx instruction. */
|
||||
/* dcbt and dcbtst does not generate DTLB Misses/Errors,
|
||||
* no need to include them here */
|
||||
srwi r10, r11, 26 /* check if major OP code is 31 */
|
||||
cmpwi cr0, r10, 31
|
||||
bne- 141f
|
||||
rlwinm r10, r11, 0, 21, 30
|
||||
xoris r10, r11, 0x7c00 /* check if major OP code is 31 */
|
||||
rlwinm r10, r10, 0, 21, 5
|
||||
cmpwi cr0, r10, 2028 /* Is dcbz? */
|
||||
beq+ 142f
|
||||
cmpwi cr0, r10, 940 /* Is dcbi? */
|
||||
@@ -599,16 +560,13 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
beq+ 142f
|
||||
cmpwi cr0, r10, 1964 /* Is icbi? */
|
||||
beq+ 142f
|
||||
141: mfspr r10, SPRN_DAR /* r10 must hold DAR at exit */
|
||||
141: mfspr r10,SPRN_SPRG_SCRATCH2
|
||||
b DARFixed /* Nope, go back to normal TLB processing */
|
||||
|
||||
144: mfspr r10, SPRN_DSISR
|
||||
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
|
||||
mtspr SPRN_DSISR, r10
|
||||
142: /* continue, it was a dcbx, dcbi instruction. */
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lwz r3, 8(r0) /* restore r3 from memory */
|
||||
#endif
|
||||
#ifndef NO_SELF_MODIFYING_CODE
|
||||
andis. r10,r11,0x1f /* test if reg RA is r0 */
|
||||
li r10,modified_instr@l
|
||||
@@ -619,14 +577,15 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
stw r11,0(r10) /* store add/and instruction */
|
||||
dcbf 0,r10 /* flush new instr. to memory. */
|
||||
icbi 0,r10 /* invalidate instr. cache line */
|
||||
lwz r11, 4(r0) /* restore r11 from memory */
|
||||
mfspr r10, SPRN_M_TW /* restore r10 from M_TW */
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1 /* restore r11 */
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0 /* restore r10 */
|
||||
isync /* Wait until new instr is loaded from memory */
|
||||
modified_instr:
|
||||
.space 4 /* this is where the add instr. is stored */
|
||||
bne+ 143f
|
||||
subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */
|
||||
143: mtdar r10 /* store faulting EA in DAR */
|
||||
mfspr r10,SPRN_SPRG_SCRATCH2
|
||||
b DARFixed /* Go back to normal TLB handling */
|
||||
#else
|
||||
mfctr r10
|
||||
@@ -680,13 +639,16 @@ modified_instr:
|
||||
mfdar r11
|
||||
mtctr r11 /* restore ctr reg from DAR */
|
||||
mtdar r10 /* save fault EA to DAR */
|
||||
mfspr r10,SPRN_SPRG_SCRATCH2
|
||||
b DARFixed /* Go back to normal TLB handling */
|
||||
|
||||
/* special handling for r10,r11 since these are modified already */
|
||||
153: lwz r11, 4(r0) /* load r11 from memory */
|
||||
b 155f
|
||||
154: mfspr r11, SPRN_M_TW /* load r10 from M_TW */
|
||||
155: add r10, r10, r11 /* add it */
|
||||
153: mfspr r11, SPRN_SPRG_SCRATCH1 /* load r11 from SPRN_SPRG_SCRATCH1 */
|
||||
add r10, r10, r11 /* add it */
|
||||
mfctr r11 /* restore r11 */
|
||||
b 151b
|
||||
154: mfspr r11, SPRN_SPRG_SCRATCH0 /* load r10 from SPRN_SPRG_SCRATCH0 */
|
||||
add r10, r10, r11 /* add it */
|
||||
mfctr r11 /* restore r11 */
|
||||
b 151b
|
||||
#endif
|
||||
|
@@ -293,7 +293,7 @@ out:
|
||||
/*
|
||||
* Handle single-step exceptions following a DABR hit.
|
||||
*/
|
||||
int __kprobes single_step_dabr_instruction(struct die_args *args)
|
||||
static int __kprobes single_step_dabr_instruction(struct die_args *args)
|
||||
{
|
||||
struct pt_regs *regs = args->regs;
|
||||
struct perf_event *bp = NULL;
|
||||
|
@@ -55,7 +55,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
|
||||
struct bus_type ibmebus_bus_type;
|
||||
|
||||
/* These devices will automatically be added to the bus during init */
|
||||
static struct of_device_id __initdata ibmebus_matches[] = {
|
||||
static const struct of_device_id ibmebus_matches[] __initconst = {
|
||||
{ .compatible = "IBM,lhca" },
|
||||
{ .compatible = "IBM,lhea" },
|
||||
{},
|
||||
|
@@ -73,7 +73,7 @@ _GLOBAL(power7_powersave_common)
|
||||
|
||||
/* Check if something happened while soft-disabled */
|
||||
lbz r0,PACAIRQHAPPENED(r13)
|
||||
cmpwi cr0,r0,0
|
||||
andi. r0,r0,~PACA_IRQ_HARD_DIS@l
|
||||
beq 1f
|
||||
cmpwi cr0,r4,0
|
||||
beq 1f
|
||||
|
@@ -444,13 +444,13 @@ void migrate_irqs(void)
|
||||
|
||||
cpumask_and(mask, data->affinity, map);
|
||||
if (cpumask_any(mask) >= nr_cpu_ids) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
pr_warn("Breaking affinity for irq %i\n", irq);
|
||||
cpumask_copy(mask, map);
|
||||
}
|
||||
if (chip->irq_set_affinity)
|
||||
chip->irq_set_affinity(data, mask, true);
|
||||
else if (desc->action && !(warned++))
|
||||
printk("Cannot set affinity for irq %i\n", irq);
|
||||
pr_err("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
||||
free_cpumask_var(mask);
|
||||
@@ -470,7 +470,7 @@ static inline void check_stack_overflow(void)
|
||||
|
||||
/* check for stack overflow: is there less than 2KB free? */
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
|
||||
printk("do_IRQ: stack overflow: %ld\n",
|
||||
pr_err("do_IRQ: stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
|
@@ -35,7 +35,7 @@ static struct legacy_serial_info {
|
||||
phys_addr_t taddr;
|
||||
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
|
||||
|
||||
static struct of_device_id legacy_serial_parents[] __initdata = {
|
||||
static const struct of_device_id legacy_serial_parents[] __initconst = {
|
||||
{.type = "soc",},
|
||||
{.type = "tsi-bridge",},
|
||||
{.type = "opb", },
|
||||
|
@@ -15,6 +15,9 @@
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/elf.h>
|
||||
@@ -28,12 +31,6 @@
|
||||
#include <linux/sort.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
#else
|
||||
#define DEBUGP(fmt , ...)
|
||||
#endif
|
||||
|
||||
/* Count how many different relocations (different symbol, different
|
||||
addend) */
|
||||
static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
|
||||
@@ -121,8 +118,8 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
|
||||
continue;
|
||||
|
||||
if (sechdrs[i].sh_type == SHT_RELA) {
|
||||
DEBUGP("Found relocations in section %u\n", i);
|
||||
DEBUGP("Ptr: %p. Number: %u\n",
|
||||
pr_debug("Found relocations in section %u\n", i);
|
||||
pr_debug("Ptr: %p. Number: %u\n",
|
||||
(void *)hdr + sechdrs[i].sh_offset,
|
||||
sechdrs[i].sh_size / sizeof(Elf32_Rela));
|
||||
|
||||
@@ -161,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
|
||||
me->arch.core_plt_section = i;
|
||||
}
|
||||
if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
|
||||
printk("Module doesn't contain .plt or .init.plt sections.\n");
|
||||
pr_err("Module doesn't contain .plt or .init.plt sections.\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
@@ -189,7 +186,7 @@ static uint32_t do_plt_call(void *location,
|
||||
{
|
||||
struct ppc_plt_entry *entry;
|
||||
|
||||
DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
|
||||
pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
|
||||
/* Init, or core PLT? */
|
||||
if (location >= mod->module_core
|
||||
&& location < mod->module_core + mod->core_size)
|
||||
@@ -208,7 +205,7 @@ static uint32_t do_plt_call(void *location,
|
||||
entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
|
||||
entry->jump[3] = 0x4e800420; /* bctr */
|
||||
|
||||
DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
|
||||
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
|
||||
return (uint32_t)entry;
|
||||
}
|
||||
|
||||
@@ -224,7 +221,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
||||
uint32_t *location;
|
||||
uint32_t value;
|
||||
|
||||
DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
|
||||
pr_debug("Applying ADD relocate section %u to %u\n", relsec,
|
||||
sechdrs[relsec].sh_info);
|
||||
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
|
||||
/* This is where to make the change */
|
||||
@@ -268,17 +265,17 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
||||
sechdrs, module);
|
||||
|
||||
/* Only replace bits 2 through 26 */
|
||||
DEBUGP("REL24 value = %08X. location = %08X\n",
|
||||
pr_debug("REL24 value = %08X. location = %08X\n",
|
||||
value, (uint32_t)location);
|
||||
DEBUGP("Location before: %08X.\n",
|
||||
pr_debug("Location before: %08X.\n",
|
||||
*(uint32_t *)location);
|
||||
*(uint32_t *)location
|
||||
= (*(uint32_t *)location & ~0x03fffffc)
|
||||
| ((value - (uint32_t)location)
|
||||
& 0x03fffffc);
|
||||
DEBUGP("Location after: %08X.\n",
|
||||
pr_debug("Location after: %08X.\n",
|
||||
*(uint32_t *)location);
|
||||
DEBUGP("ie. jump to %08X+%08X = %08X\n",
|
||||
pr_debug("ie. jump to %08X+%08X = %08X\n",
|
||||
*(uint32_t *)location & 0x03fffffc,
|
||||
(uint32_t)location,
|
||||
(*(uint32_t *)location & 0x03fffffc)
|
||||
@@ -291,7 +288,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("%s: unknown ADD relocation: %u\n",
|
||||
pr_err("%s: unknown ADD relocation: %u\n",
|
||||
module->name,
|
||||
ELF32_R_TYPE(rela[i].r_info));
|
||||
return -ENOEXEC;
|
||||
|
@@ -15,6 +15,9 @@
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/moduleloader.h>
|
||||
@@ -36,11 +39,6 @@
|
||||
Using a magic allocator which places modules within 32MB solves
|
||||
this, and makes other things simpler. Anton?
|
||||
--RR. */
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
#else
|
||||
#define DEBUGP(fmt , ...)
|
||||
#endif
|
||||
|
||||
#if defined(_CALL_ELF) && _CALL_ELF == 2
|
||||
#define R2_STACK_OFFSET 24
|
||||
@@ -279,8 +277,8 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
|
||||
/* Every relocated section... */
|
||||
for (i = 1; i < hdr->e_shnum; i++) {
|
||||
if (sechdrs[i].sh_type == SHT_RELA) {
|
||||
DEBUGP("Found relocations in section %u\n", i);
|
||||
DEBUGP("Ptr: %p. Number: %lu\n",
|
||||
pr_debug("Found relocations in section %u\n", i);
|
||||
pr_debug("Ptr: %p. Number: %Lu\n",
|
||||
(void *)sechdrs[i].sh_addr,
|
||||
sechdrs[i].sh_size / sizeof(Elf64_Rela));
|
||||
|
||||
@@ -304,7 +302,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
|
||||
relocs++;
|
||||
#endif
|
||||
|
||||
DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
|
||||
pr_debug("Looks like a total of %lu stubs, max\n", relocs);
|
||||
return relocs * sizeof(struct ppc64_stub_entry);
|
||||
}
|
||||
|
||||
@@ -390,7 +388,7 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
|
||||
}
|
||||
|
||||
if (!me->arch.stubs_section) {
|
||||
printk("%s: doesn't contain .stubs.\n", me->name);
|
||||
pr_err("%s: doesn't contain .stubs.\n", me->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
@@ -434,11 +432,11 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
|
||||
/* Stub uses address relative to r2. */
|
||||
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
|
||||
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
|
||||
printk("%s: Address %p of stub out of range of %p.\n",
|
||||
pr_err("%s: Address %p of stub out of range of %p.\n",
|
||||
me->name, (void *)reladdr, (void *)my_r2);
|
||||
return 0;
|
||||
}
|
||||
DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
|
||||
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
|
||||
|
||||
entry->jump[0] |= PPC_HA(reladdr);
|
||||
entry->jump[1] |= PPC_LO(reladdr);
|
||||
@@ -477,7 +475,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
|
||||
static int restore_r2(u32 *instruction, struct module *me)
|
||||
{
|
||||
if (*instruction != PPC_INST_NOP) {
|
||||
printk("%s: Expect noop after relocate, got %08x\n",
|
||||
pr_err("%s: Expect noop after relocate, got %08x\n",
|
||||
me->name, *instruction);
|
||||
return 0;
|
||||
}
|
||||
@@ -498,7 +496,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
unsigned long *location;
|
||||
unsigned long value;
|
||||
|
||||
DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
|
||||
pr_debug("Applying ADD relocate section %u to %u\n", relsec,
|
||||
sechdrs[relsec].sh_info);
|
||||
|
||||
/* First time we're called, we can fix up .TOC. */
|
||||
@@ -519,7 +517,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
|
||||
+ ELF64_R_SYM(rela[i].r_info);
|
||||
|
||||
DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
|
||||
pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
|
||||
location, (long)ELF64_R_TYPE(rela[i].r_info),
|
||||
strtab + sym->st_name, (unsigned long)sym->st_value,
|
||||
(long)rela[i].r_addend);
|
||||
@@ -546,7 +544,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
/* Subtract TOC pointer */
|
||||
value -= my_r2(sechdrs, me);
|
||||
if (value + 0x8000 > 0xffff) {
|
||||
printk("%s: bad TOC16 relocation (%lu)\n",
|
||||
pr_err("%s: bad TOC16 relocation (0x%lx)\n",
|
||||
me->name, value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
@@ -567,7 +565,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
/* Subtract TOC pointer */
|
||||
value -= my_r2(sechdrs, me);
|
||||
if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
|
||||
printk("%s: bad TOC16_DS relocation (%lu)\n",
|
||||
pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
|
||||
me->name, value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
@@ -580,7 +578,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
/* Subtract TOC pointer */
|
||||
value -= my_r2(sechdrs, me);
|
||||
if ((value & 3) != 0) {
|
||||
printk("%s: bad TOC16_LO_DS relocation (%lu)\n",
|
||||
pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
|
||||
me->name, value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
@@ -613,7 +611,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
/* Convert value to relative */
|
||||
value -= (unsigned long)location;
|
||||
if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
|
||||
printk("%s: REL24 %li out of range!\n",
|
||||
pr_err("%s: REL24 %li out of range!\n",
|
||||
me->name, (long int)value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
@@ -655,7 +653,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("%s: Unknown ADD relocation: %lu\n",
|
||||
pr_err("%s: Unknown ADD relocation: %lu\n",
|
||||
me->name,
|
||||
(unsigned long)ELF64_R_TYPE(rela[i].r_info));
|
||||
return -ENOEXEC;
|
||||
|
@@ -567,7 +567,7 @@ static int __init nvram_init(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void __exit nvram_cleanup(void)
|
||||
static void __exit nvram_cleanup(void)
|
||||
{
|
||||
misc_deregister( &nvram_dev );
|
||||
}
|
||||
|
@@ -97,7 +97,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id of_pci_phb_ids[] = {
|
||||
static const struct of_device_id of_pci_phb_ids[] = {
|
||||
{ .type = "pci", },
|
||||
{ .type = "pcix", },
|
||||
{ .type = "pcie", },
|
||||
|
@@ -1140,7 +1140,7 @@ static int reparent_resources(struct resource *parent,
|
||||
* as well.
|
||||
*/
|
||||
|
||||
void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
||||
static void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_bus *b;
|
||||
int i;
|
||||
@@ -1561,7 +1561,6 @@ EARLY_PCI_OP(write, byte, u8)
|
||||
EARLY_PCI_OP(write, word, u16)
|
||||
EARLY_PCI_OP(write, dword, u32)
|
||||
|
||||
extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
|
||||
int early_find_capability(struct pci_controller *hose, int bus, int devfn,
|
||||
int cap)
|
||||
{
|
||||
|
@@ -38,7 +38,7 @@ static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
|
||||
* @addr0: value of 1st cell of a device tree PCI address.
|
||||
* @bridge: Set this flag if the address is from a bridge 'ranges' property
|
||||
*/
|
||||
unsigned int pci_parse_of_flags(u32 addr0, int bridge)
|
||||
static unsigned int pci_parse_of_flags(u32 addr0, int bridge)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
|
||||
|
@@ -1,207 +1,41 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/nvram.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <linux/adb.h>
|
||||
#include <linux/cuda.h>
|
||||
#include <linux/pmu.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/pmac_feature.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/nvram.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/backlight.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/btext.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/dcr.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/epapr_hcalls.h>
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
extern void transfer_to_handler(void);
|
||||
extern void do_IRQ(struct pt_regs *regs);
|
||||
extern void machine_check_exception(struct pt_regs *regs);
|
||||
extern void alignment_exception(struct pt_regs *regs);
|
||||
extern void program_check_exception(struct pt_regs *regs);
|
||||
extern void single_step_exception(struct pt_regs *regs);
|
||||
extern int sys_sigreturn(struct pt_regs *regs);
|
||||
EXPORT_SYMBOL(flush_dcache_range);
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
EXPORT_SYMBOL(clear_pages);
|
||||
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
|
||||
EXPORT_SYMBOL(DMA_MODE_READ);
|
||||
EXPORT_SYMBOL(DMA_MODE_WRITE);
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
EXPORT_SYMBOL(transfer_to_handler);
|
||||
EXPORT_SYMBOL(do_IRQ);
|
||||
EXPORT_SYMBOL(machine_check_exception);
|
||||
EXPORT_SYMBOL(alignment_exception);
|
||||
EXPORT_SYMBOL(program_check_exception);
|
||||
EXPORT_SYMBOL(single_step_exception);
|
||||
EXPORT_SYMBOL(sys_sigreturn);
|
||||
#endif
|
||||
long long __bswapdi2(long long);
|
||||
EXPORT_SYMBOL(__bswapdi2);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
EXPORT_SYMBOL(_mcount);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(strcpy);
|
||||
EXPORT_SYMBOL(strncpy);
|
||||
EXPORT_SYMBOL(strcat);
|
||||
EXPORT_SYMBOL(strlen);
|
||||
EXPORT_SYMBOL(strcmp);
|
||||
EXPORT_SYMBOL(strncmp);
|
||||
|
||||
#ifndef CONFIG_GENERIC_CSUM
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
EXPORT_SYMBOL(csum_partial_copy_generic);
|
||||
EXPORT_SYMBOL(ip_fast_csum);
|
||||
EXPORT_SYMBOL(csum_tcpudp_magic);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(__copy_tofrom_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
|
||||
#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
|
||||
EXPORT_SYMBOL(isa_io_base);
|
||||
EXPORT_SYMBOL(isa_mem_base);
|
||||
EXPORT_SYMBOL(pci_dram_offset);
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
EXPORT_SYMBOL(start_thread);
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
EXPORT_SYMBOL(giveup_fpu);
|
||||
EXPORT_SYMBOL(load_fp_state);
|
||||
EXPORT_SYMBOL(store_fp_state);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
EXPORT_SYMBOL(giveup_altivec);
|
||||
EXPORT_SYMBOL(load_vr_state);
|
||||
EXPORT_SYMBOL(store_vr_state);
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
EXPORT_SYMBOL(giveup_vsx);
|
||||
EXPORT_SYMBOL_GPL(__giveup_vsx);
|
||||
#endif /* CONFIG_VSX */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
EXPORT_SYMBOL(giveup_spe);
|
||||
#endif /* CONFIG_SPE */
|
||||
|
||||
#ifndef CONFIG_PPC64
|
||||
EXPORT_SYMBOL(flush_instruction_cache);
|
||||
#endif
|
||||
EXPORT_SYMBOL(flush_dcache_range);
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_PPC32
|
||||
EXPORT_SYMBOL(smp_hw_index);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ADB
|
||||
EXPORT_SYMBOL(adb_request);
|
||||
EXPORT_SYMBOL(adb_register);
|
||||
EXPORT_SYMBOL(adb_unregister);
|
||||
EXPORT_SYMBOL(adb_poll);
|
||||
EXPORT_SYMBOL(adb_try_handler_change);
|
||||
#endif /* CONFIG_ADB */
|
||||
#ifdef CONFIG_ADB_CUDA
|
||||
EXPORT_SYMBOL(cuda_request);
|
||||
EXPORT_SYMBOL(cuda_poll);
|
||||
#endif /* CONFIG_ADB_CUDA */
|
||||
EXPORT_SYMBOL(to_tm);
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
long long __ashrdi3(long long, int);
|
||||
long long __ashldi3(long long, int);
|
||||
long long __lshrdi3(long long, int);
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
EXPORT_SYMBOL(__lshrdi3);
|
||||
int __ucmpdi2(unsigned long long, unsigned long long);
|
||||
EXPORT_SYMBOL(__ucmpdi2);
|
||||
int __cmpdi2(long long, long long);
|
||||
EXPORT_SYMBOL(__cmpdi2);
|
||||
#endif
|
||||
long long __bswapdi2(long long);
|
||||
EXPORT_SYMBOL(__bswapdi2);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memmove);
|
||||
EXPORT_SYMBOL(memcmp);
|
||||
EXPORT_SYMBOL(memchr);
|
||||
|
||||
#if defined(CONFIG_FB_VGA16_MODULE)
|
||||
EXPORT_SYMBOL(screen_info);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
EXPORT_SYMBOL(timer_interrupt);
|
||||
EXPORT_SYMBOL(tb_ticks_per_jiffy);
|
||||
EXPORT_SYMBOL(cacheable_memcpy);
|
||||
EXPORT_SYMBOL(cacheable_memzero);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
EXPORT_SYMBOL(switch_mmu_context);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_32
|
||||
extern long mol_trampoline;
|
||||
EXPORT_SYMBOL(mol_trampoline); /* For MOL */
|
||||
EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
|
||||
#ifdef CONFIG_SMP
|
||||
extern int mmu_hash_lock;
|
||||
EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
|
||||
#endif /* CONFIG_SMP */
|
||||
extern long *intercept_table;
|
||||
EXPORT_SYMBOL(intercept_table);
|
||||
#endif /* CONFIG_PPC_STD_MMU_32 */
|
||||
#ifdef CONFIG_PPC_DCR_NATIVE
|
||||
EXPORT_SYMBOL(__mtdcr);
|
||||
EXPORT_SYMBOL(__mfdcr);
|
||||
#endif
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
EXPORT_SYMBOL(__arch_hweight8);
|
||||
EXPORT_SYMBOL(__arch_hweight16);
|
||||
EXPORT_SYMBOL(__arch_hweight32);
|
||||
EXPORT_SYMBOL(__arch_hweight64);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
EXPORT_SYMBOL_GPL(mmu_psize_defs);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EPAPR_PARAVIRT
|
||||
|
61
arch/powerpc/kernel/ppc_ksyms_32.c
Normal file
61
arch/powerpc/kernel/ppc_ksyms_32.c
Normal file
@@ -0,0 +1,61 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/dcr.h>
|
||||
|
||||
EXPORT_SYMBOL(clear_pages);
|
||||
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
|
||||
EXPORT_SYMBOL(DMA_MODE_READ);
|
||||
EXPORT_SYMBOL(DMA_MODE_WRITE);
|
||||
|
||||
#if defined(CONFIG_PCI)
|
||||
EXPORT_SYMBOL(isa_io_base);
|
||||
EXPORT_SYMBOL(isa_mem_base);
|
||||
EXPORT_SYMBOL(pci_dram_offset);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
EXPORT_SYMBOL(smp_hw_index);
|
||||
#endif
|
||||
|
||||
long long __ashrdi3(long long, int);
|
||||
long long __ashldi3(long long, int);
|
||||
long long __lshrdi3(long long, int);
|
||||
int __ucmpdi2(unsigned long long, unsigned long long);
|
||||
int __cmpdi2(long long, long long);
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
EXPORT_SYMBOL(__lshrdi3);
|
||||
EXPORT_SYMBOL(__ucmpdi2);
|
||||
EXPORT_SYMBOL(__cmpdi2);
|
||||
|
||||
EXPORT_SYMBOL(timer_interrupt);
|
||||
EXPORT_SYMBOL(tb_ticks_per_jiffy);
|
||||
|
||||
EXPORT_SYMBOL(switch_mmu_context);
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_32
|
||||
extern long mol_trampoline;
|
||||
EXPORT_SYMBOL(mol_trampoline); /* For MOL */
|
||||
EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
|
||||
#ifdef CONFIG_SMP
|
||||
extern int mmu_hash_lock;
|
||||
EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
|
||||
#endif /* CONFIG_SMP */
|
||||
extern long *intercept_table;
|
||||
EXPORT_SYMBOL(intercept_table);
|
||||
#endif /* CONFIG_PPC_STD_MMU_32 */
|
||||
|
||||
#ifdef CONFIG_PPC_DCR_NATIVE
|
||||
EXPORT_SYMBOL(__mtdcr);
|
||||
EXPORT_SYMBOL(__mfdcr);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(flush_instruction_cache);
|
@@ -228,6 +228,7 @@ void giveup_vsx(struct task_struct *tsk)
|
||||
giveup_altivec_maybe_transactional(tsk);
|
||||
__giveup_vsx(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(giveup_vsx);
|
||||
|
||||
void flush_vsx_to_thread(struct task_struct *tsk)
|
||||
{
|
||||
@@ -1316,6 +1317,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
||||
current->thread.tm_tfiar = 0;
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
}
|
||||
EXPORT_SYMBOL(start_thread);
|
||||
|
||||
#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
|
||||
| PR_FP_EXC_RES | PR_FP_EXC_INV)
|
||||
|
@@ -386,8 +386,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
|
||||
int depth, void *data)
|
||||
static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
|
||||
const char *uname,
|
||||
int depth, void *data)
|
||||
{
|
||||
const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
|
||||
|
||||
@@ -641,6 +642,10 @@ void __init early_init_devtree(void *params)
|
||||
|
||||
DBG(" -> early_init_devtree(%p)\n", params);
|
||||
|
||||
/* Too early to BUG_ON(), do it by hand */
|
||||
if (!early_init_dt_verify(params))
|
||||
panic("BUG: Failed verifying flat device tree, bad version?");
|
||||
|
||||
/* Setup flat device-tree pointer */
|
||||
initial_boot_params = params;
|
||||
|
||||
@@ -663,14 +668,12 @@ void __init early_init_devtree(void *params)
|
||||
* device-tree, including the platform type, initrd location and
|
||||
* size, TCE reserve, and more ...
|
||||
*/
|
||||
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
|
||||
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
|
||||
|
||||
/* Scan memory nodes and rebuild MEMBLOCKs */
|
||||
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
||||
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
|
||||
|
||||
/* Save command line for /proc/cmdline and then parse parameters */
|
||||
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
|
||||
parse_early_param();
|
||||
|
||||
/* make sure we've parsed cmdline for mem= before this */
|
||||
|
@@ -50,24 +50,14 @@ do
|
||||
done
|
||||
|
||||
# ignore register save/restore funcitons
|
||||
if [ "${UNDEF:0:9}" = "_restgpr_" ]; then
|
||||
case $UNDEF in
|
||||
_restgpr_*|_restgpr0_*|_rest32gpr_*)
|
||||
OK=1
|
||||
fi
|
||||
if [ "${UNDEF:0:10}" = "_restgpr0_" ]; then
|
||||
;;
|
||||
_savegpr_*|_savegpr0_*|_save32gpr_*)
|
||||
OK=1
|
||||
fi
|
||||
if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then
|
||||
OK=1
|
||||
fi
|
||||
if [ "${UNDEF:0:9}" = "_savegpr_" ]; then
|
||||
OK=1
|
||||
fi
|
||||
if [ "${UNDEF:0:10}" = "_savegpr0_" ]; then
|
||||
OK=1
|
||||
fi
|
||||
if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then
|
||||
OK=1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ $OK -eq 0 ]; then
|
||||
ERROR=1
|
||||
|
@@ -932,7 +932,7 @@ void ptrace_triggered(struct perf_event *bp,
|
||||
}
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
|
||||
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
||||
static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
||||
unsigned long data)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
|
@@ -286,7 +286,7 @@ static void prrn_work_fn(struct work_struct *work)
|
||||
|
||||
static DECLARE_WORK(prrn_work, prrn_work_fn);
|
||||
|
||||
void prrn_schedule_update(u32 scope)
|
||||
static void prrn_schedule_update(u32 scope)
|
||||
{
|
||||
flush_work(&prrn_work);
|
||||
prrn_update_scope = scope;
|
||||
|
@@ -81,8 +81,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid);
|
||||
|
||||
unsigned long klimit = (unsigned long) _end;
|
||||
|
||||
char cmd_line[COMMAND_LINE_SIZE];
|
||||
|
||||
/*
|
||||
* This still seems to be needed... -- paulus
|
||||
*/
|
||||
@@ -94,6 +92,9 @@ struct screen_info screen_info = {
|
||||
.orig_video_isVGA = 1,
|
||||
.orig_video_points = 16
|
||||
};
|
||||
#if defined(CONFIG_FB_VGA16_MODULE)
|
||||
EXPORT_SYMBOL(screen_info);
|
||||
#endif
|
||||
|
||||
/* Variables required to store legacy IO irq routing */
|
||||
int of_i8042_kbd_irq;
|
||||
@@ -382,7 +383,7 @@ void __init check_for_initrd(void)
|
||||
initrd_start = initrd_end = 0;
|
||||
|
||||
if (initrd_start)
|
||||
printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
|
||||
pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
|
||||
|
||||
DBG(" <- check_for_initrd()\n");
|
||||
#endif /* CONFIG_BLK_DEV_INITRD */
|
||||
|
@@ -268,7 +268,7 @@ static void __init exc_lvl_early_init(void)
|
||||
/* Warning, IO base is not yet inited */
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
*cmdline_p = cmd_line;
|
||||
*cmdline_p = boot_command_line;
|
||||
|
||||
/* so udelay does something sensible, assume <= 1000 bogomips */
|
||||
loops_per_jiffy = 500000000 / HZ;
|
||||
|
@@ -525,21 +525,31 @@ void __init setup_system(void)
|
||||
printk("Starting Linux PPC64 %s\n", init_utsname()->version);
|
||||
|
||||
printk("-----------------------------------------------------\n");
|
||||
printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
|
||||
printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
|
||||
printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
|
||||
printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
|
||||
|
||||
if (ppc64_caches.dline_size != 0x80)
|
||||
printk("ppc64_caches.dcache_line_size = 0x%x\n",
|
||||
ppc64_caches.dline_size);
|
||||
printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
|
||||
if (ppc64_caches.iline_size != 0x80)
|
||||
printk("ppc64_caches.icache_line_size = 0x%x\n",
|
||||
ppc64_caches.iline_size);
|
||||
printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
|
||||
|
||||
printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
|
||||
printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
|
||||
printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
|
||||
printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
|
||||
cur_cpu_spec->cpu_user_features2);
|
||||
printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
|
||||
printk("firmware_features = 0x%016lx\n", powerpc_firmware_features);
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (htab_address)
|
||||
printk("htab_address = 0x%p\n", htab_address);
|
||||
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
printk("htab_address = 0x%p\n", htab_address);
|
||||
|
||||
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
|
||||
#endif
|
||||
|
||||
if (PHYSICAL_START > 0)
|
||||
printk("physical_start = 0x%llx\n",
|
||||
printk("physical_start = 0x%llx\n",
|
||||
(unsigned long long)PHYSICAL_START);
|
||||
printk("-----------------------------------------------------\n");
|
||||
|
||||
@@ -657,7 +667,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
ppc64_boot_msg(0x12, "Setup Arch");
|
||||
|
||||
*cmdline_p = cmd_line;
|
||||
*cmdline_p = boot_command_line;
|
||||
|
||||
/*
|
||||
* Set cache line size based on type of cpu as a default.
|
||||
|
@@ -52,6 +52,7 @@
|
||||
#endif
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#include <asm/udbg.h>
|
||||
@@ -379,8 +380,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
/*
|
||||
* numa_node_id() works after this.
|
||||
*/
|
||||
set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
|
||||
set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu]));
|
||||
if (cpu_present(cpu)) {
|
||||
set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
|
||||
set_cpu_numa_mem(cpu,
|
||||
local_memory_node(numa_cpu_lookup_table[cpu]));
|
||||
}
|
||||
}
|
||||
|
||||
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
|
||||
@@ -728,6 +732,9 @@ void start_secondary(void *unused)
|
||||
}
|
||||
traverse_core_siblings(cpu, true);
|
||||
|
||||
set_numa_node(numa_cpu_lookup_table[cpu]);
|
||||
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
|
||||
|
||||
smp_wmb();
|
||||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
|
@@ -479,7 +479,7 @@ void arch_irq_work_raise(void)
|
||||
|
||||
#endif /* CONFIG_IRQ_WORK */
|
||||
|
||||
void __timer_interrupt(void)
|
||||
static void __timer_interrupt(void)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
|
||||
@@ -643,7 +643,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
|
||||
return found;
|
||||
}
|
||||
|
||||
void start_cpu_decrementer(void)
|
||||
static void start_cpu_decrementer(void)
|
||||
{
|
||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||
/* Clear any pending timer interrupts */
|
||||
@@ -1024,6 +1024,7 @@ void to_tm(int tim, struct rtc_time * tm)
|
||||
*/
|
||||
GregorianDay(tm);
|
||||
}
|
||||
EXPORT_SYMBOL(to_tm);
|
||||
|
||||
/*
|
||||
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
|
||||
|
Reference in New Issue
Block a user