Merge tag 'please-pull-ia64_for_5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 updates from Tony Luck:
 "The big change here is removal of support for SGI Altix"

* tag 'please-pull-ia64_for_5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: (33 commits)
  genirq: remove the is_affinity_mask_valid hook
  ia64: remove CONFIG_SWIOTLB ifdefs
  ia64: remove support for machvecs
  ia64: move the screen_info setup to common code
  ia64: move the ROOT_DEV setup to common code
  ia64: rework iommu probing
  ia64: remove the unused sn_coherency_id symbol
  ia64: remove the SGI UV simulator support
  ia64: remove the zx1 swiotlb machvec
  ia64: remove CONFIG_ACPI ifdefs
  ia64: remove CONFIG_PCI ifdefs
  ia64: remove the hpsim platform
  ia64: remove now unused machvec indirections
  ia64: remove support for the SGI SN2 platform
  drivers: remove the SGI SN2 IOC4 base support
  drivers: remove the SGI SN2 IOC3 base support
  qla2xxx: remove SGI SN2 support
  qla1280: remove SGI SN2 support
  misc/sgi-xp: remove SGI SN2 support
  char/mspec: remove SGI SN2 support
  ...
This commit is contained in:
Linus Torvalds
2019-09-16 15:32:01 -07:00
231 changed files with 349 additions and 40676 deletions

View File

@@ -126,18 +126,6 @@ config INTEL_MID_PTI
an Intel Atom (non-netbook) mobile device containing a MIPI
P1149.7 standard implementation.
config SGI_IOC4
tristate "SGI IOC4 Base IO support"
depends on PCI
---help---
This option enables basic support for the IOC4 chip on certain
SGI IO controller cards (IO9, IO10, and PCI-RT). This option
does not enable any specific functions on such a card, but provides
necessary infrastructure for other drivers to utilize.
If you have an SGI Altix with an IOC4-based card say Y.
Otherwise say N.
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
@@ -200,9 +188,8 @@ config ENCLOSURE_SERVICES
config SGI_XP
tristate "Support communication between SGI SSIs"
depends on NET
depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
depends on (IA64_SGI_UV || X86_UV) && SMP
depends on X86_64 || BROKEN
select SGI_GRU if X86_64 && SMP
---help---
An SGI machine can be divided into multiple Single System

View File

@@ -21,7 +21,6 @@ obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/

View File

@@ -1,498 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
/* This file contains the master driver module for use by SGI IOC4 subdrivers.
*
* It allocates any resources shared between multiple subdevices, and
* provides accessor functions (where needed) and the like for those
* resources. It also provides a mechanism for the subdevice modules
* to support loading and unloading.
*
* Non-shared resources (e.g. external interrupt A_INT_OUT register page
* alias, serial port and UART registers) are handled by the subdevice
* modules themselves.
*
* This is all necessary because IOC4 is not implemented as a multi-function
* PCI device, but an amalgamation of disparate registers for several
* types of device (ATA, serial, external interrupts). The normal
* resource management in the kernel doesn't have quite the right interfaces
* to handle this situation (e.g. multiple modules can't claim the same
* PCI ID), thus this IOC4 master module.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioc4.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <asm/io.h>
/***************
* Definitions *
***************/
/* Tweakable values */
/* PCI bus speed detection/calibration */
#define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */
#define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */
#define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */
#define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */
#define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */
#define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */
/************************
* Submodule management *
************************/
static DEFINE_MUTEX(ioc4_mutex);
static LIST_HEAD(ioc4_devices);
static LIST_HEAD(ioc4_submodules);
/* Register an IOC4 submodule */
int
ioc4_register_submodule(struct ioc4_submodule *is)
{
struct ioc4_driver_data *idd;
mutex_lock(&ioc4_mutex);
list_add(&is->is_list, &ioc4_submodules);
/* Initialize submodule for each IOC4 */
if (!is->is_probe)
goto out;
list_for_each_entry(idd, &ioc4_devices, idd_list) {
if (is->is_probe(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule %s probe failed "
"for pci_dev %s",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
out:
mutex_unlock(&ioc4_mutex);
return 0;
}
/* Unregister an IOC4 submodule */
void
ioc4_unregister_submodule(struct ioc4_submodule *is)
{
struct ioc4_driver_data *idd;
mutex_lock(&ioc4_mutex);
list_del(&is->is_list);
/* Remove submodule for each IOC4 */
if (!is->is_remove)
goto out;
list_for_each_entry(idd, &ioc4_devices, idd_list) {
if (is->is_remove(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule %s remove failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
out:
mutex_unlock(&ioc4_mutex);
}
/*********************
* Device management *
*********************/
#define IOC4_CALIBRATE_LOW_LIMIT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
#define IOC4_CALIBRATE_HIGH_LIMIT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
#define IOC4_CALIBRATE_DEFAULT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
#define IOC4_CALIBRATE_END \
(IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
#define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */
/* Determines external interrupt output clock period of the PCI bus an
* IOC4 is attached to. This value can be used to determine the PCI
* bus speed.
*
* IOC4 has a design feature that various internal timers are derived from
* the PCI bus clock. This causes IOC4 device drivers to need to take the
* bus speed into account when setting various register values (e.g. INT_OUT
* register COUNT field, UART divisors, etc). Since this information is
* needed by several subdrivers, it is determined by the main IOC4 driver,
* even though the following code utilizes external interrupt registers
* to perform the speed calculation.
*/
static void
ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
union ioc4_gpcr gpcr;
unsigned int state, last_state;
uint64_t start, end, period;
unsigned int count;
/* Enable output */
gpcr.raw = 0;
gpcr.fields.dir = IOC4_GPCR_DIR_0;
gpcr.fields.int_out_en = 1;
writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
/* Set up square wave */
int_out.raw = 0;
int_out.fields.count = IOC4_CALIBRATE_COUNT;
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
/* Check square wave period averaged over some number of cycles */
start = ktime_get_ns();
state = 1; /* make sure the first read isn't a rising edge */
for (count = 0; count <= IOC4_CALIBRATE_END; count++) {
do { /* wait for a rising edge */
last_state = state;
int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
state = int_out.fields.int_out;
} while (last_state || !state);
/* discard the first few cycles */
if (count == IOC4_CALIBRATE_DISCARD)
start = ktime_get_ns();
}
end = ktime_get_ns();
/* Calculation rearranged to preserve intermediate precision.
* Logically:
* 1. "end - start" gives us the measurement period over all
* the square wave cycles.
* 2. Divide by number of square wave cycles to get the period
* of a square wave cycle.
* 3. Divide by 2*(int_out.fields.count+1), which is the formula
* by which the IOC4 generates the square wave, to get the
* period of an IOC4 INT_OUT count.
*/
period = (end - start) /
(IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
/* Bounds check the result. */
if (period > IOC4_CALIBRATE_LOW_LIMIT ||
period < IOC4_CALIBRATE_HIGH_LIMIT) {
printk(KERN_INFO
"IOC4 %s: Clock calibration failed. Assuming"
"PCI clock is %d ns.\n",
pci_name(idd->idd_pdev),
IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
period = IOC4_CALIBRATE_DEFAULT;
} else {
u64 ns = period;
do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
printk(KERN_DEBUG
"IOC4 %s: PCI clock is %llu ns.\n",
pci_name(idd->idd_pdev), (unsigned long long)ns);
}
/* Remember results. We store the extint clock period rather
* than the PCI clock period so that greater precision is
* retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get
* PCI clock period.
*/
idd->count_period = period;
}
/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
* Each brings out different combinations of IOC4 signals, thus.
* the IOC4 subdrivers need to know to which we're attached.
*
* We look for the presence of a SCSI (IO9) or SATA (IO10) controller
* on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
* If neither is present, it's a PCI-RT.
*/
static unsigned int
ioc4_variant(struct ioc4_driver_data *idd)
{
struct pci_dev *pdev = NULL;
int found = 0;
/* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
do {
pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
if (pdev &&
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
} while (pdev && !found);
if (NULL != pdev) {
pci_dev_put(pdev);
return IOC4_VARIANT_IO9;
}
/* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
pdev = NULL;
do {
pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
if (pdev &&
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
} while (pdev && !found);
if (NULL != pdev) {
pci_dev_put(pdev);
return IOC4_VARIANT_IO10;
}
/* PCI-RT: No SCSI/SATA controller will be present */
return IOC4_VARIANT_PCI_RT;
}
static void
ioc4_load_modules(struct work_struct *work)
{
request_module("sgiioc4");
}
static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
/* Adds a new instance of an IOC4 card */
static int
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc4_driver_data *idd;
struct ioc4_submodule *is;
uint32_t pcmd;
int ret;
/* Enable IOC4 and take ownership of it */
if ((ret = pci_enable_device(pdev))) {
printk(KERN_WARNING
"%s: Failed to enable IOC4 device for pci_dev %s.\n",
__func__, pci_name(pdev));
goto out;
}
pci_set_master(pdev);
/* Set up per-IOC4 data */
idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
if (!idd) {
printk(KERN_WARNING
"%s: Failed to allocate IOC4 data for pci_dev %s.\n",
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_idd;
}
idd->idd_pdev = pdev;
idd->idd_pci_id = pci_id;
/* Map IOC4 misc registers. These are shared between subdevices
* so the main IOC4 module manages them.
*/
idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
if (!idd->idd_bar0) {
printk(KERN_WARNING
"%s: Unable to find IOC4 misc resource "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_pci;
}
if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
"ioc4_misc")) {
printk(KERN_WARNING
"%s: Unable to request IOC4 misc region "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_pci;
}
idd->idd_misc_regs = ioremap(idd->idd_bar0,
sizeof(struct ioc4_misc_regs));
if (!idd->idd_misc_regs) {
printk(KERN_WARNING
"%s: Unable to remap IOC4 misc region "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_misc_region;
}
/* Failsafe portion of per-IOC4 initialization */
/* Detect card variant */
idd->idd_variant = ioc4_variant(idd);
printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
/* Initialize IOC4 */
pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
/* Determine PCI clock */
ioc4_clock_calibrate(idd);
/* Disable/clear all interrupts. Need to do this here lest
* one submodule request the shared IOC4 IRQ, but interrupt
* is generated by a different subdevice.
*/
/* Disable */
writel(~0, &idd->idd_misc_regs->other_iec.raw);
writel(~0, &idd->idd_misc_regs->sio_iec);
/* Clear (i.e. acknowledge) */
writel(~0, &idd->idd_misc_regs->other_ir.raw);
writel(~0, &idd->idd_misc_regs->sio_ir);
/* Track PCI-device specific data */
idd->idd_serial_data = NULL;
pci_set_drvdata(idd->idd_pdev, idd);
mutex_lock(&ioc4_mutex);
list_add_tail(&idd->idd_list, &ioc4_devices);
/* Add this IOC4 to all submodules */
list_for_each_entry(is, &ioc4_submodules, is_list) {
if (is->is_probe && is->is_probe(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule 0x%s probe failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
mutex_unlock(&ioc4_mutex);
/* Request sgiioc4 IDE driver on boards that bring that functionality
* off of IOC4. The root filesystem may be hosted on a drive connected
* to IOC4, so we need to make sure the sgiioc4 driver is loaded as it
* won't be picked up by modprobes due to the ioc4 module owning the
* PCI device.
*/
if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
/* Request the module from a work procedure as the modprobe
* goes out to a userland helper and that will hang if done
* directly from ioc4_probe().
*/
printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
schedule_work(&ioc4_load_modules_work);
}
return 0;
out_misc_region:
release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
out_pci:
kfree(idd);
out_idd:
pci_disable_device(pdev);
out:
return ret;
}
/* Removes a particular instance of an IOC4 card. */
static void
ioc4_remove(struct pci_dev *pdev)
{
struct ioc4_submodule *is;
struct ioc4_driver_data *idd;
idd = pci_get_drvdata(pdev);
/* Remove this IOC4 from all submodules */
mutex_lock(&ioc4_mutex);
list_for_each_entry(is, &ioc4_submodules, is_list) {
if (is->is_remove && is->is_remove(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule 0x%s remove failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
mutex_unlock(&ioc4_mutex);
/* Release resources */
iounmap(idd->idd_misc_regs);
if (!idd->idd_bar0) {
printk(KERN_WARNING
"%s: Unable to get IOC4 misc mapping for pci_dev %s. "
"Device removal may be incomplete.\n",
__func__, pci_name(idd->idd_pdev));
}
release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
/* Disable IOC4 and relinquish */
pci_disable_device(pdev);
/* Remove and free driver data */
mutex_lock(&ioc4_mutex);
list_del(&idd->idd_list);
mutex_unlock(&ioc4_mutex);
kfree(idd);
}
static const struct pci_device_id ioc4_id_table[] = {
{PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
{0}
};
static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
.remove = ioc4_remove,
};
MODULE_DEVICE_TABLE(pci, ioc4_id_table);
/*********************
* Module management *
*********************/
/* Module load */
static int __init
ioc4_init(void)
{
return pci_register_driver(&ioc4_driver);
}
/* Module unload */
static void __exit
ioc4_exit(void)
{
/* Ensure ioc4_load_modules() has completed before exiting */
flush_work(&ioc4_load_modules_work);
pci_unregister_driver(&ioc4_driver);
}
module_init(ioc4_init);
module_exit(ioc4_exit);
MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ioc4_register_submodule);
EXPORT_SYMBOL(ioc4_unregister_submodule);

View File

@@ -4,17 +4,10 @@
#
obj-$(CONFIG_SGI_XP) += xp.o
xp-y := xp_main.o
xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o
xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
xp-$(CONFIG_X86_64) += xp_uv.o
xp-y := xp_main.o xp_uv.o
obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o
xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
xpc-$(CONFIG_X86_64) += xpc_uv.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o \
xpc_uv.o
obj-$(CONFIG_SGI_XP) += xpnet.o

View File

@@ -24,23 +24,6 @@
#define is_uv() 0
#endif
#if defined CONFIG_IA64
#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
#define is_shub() ia64_platform_is("sn2")
#endif
#ifndef is_shub1
#define is_shub1() 0
#endif
#ifndef is_shub2
#define is_shub2() 0
#endif
#ifndef is_shub
#define is_shub() 0
#endif
#ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition)
#else
@@ -360,9 +343,7 @@ extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void);
extern struct device *xp;
extern enum xp_retval xp_init_sn2(void);
extern enum xp_retval xp_init_uv(void);
extern void xp_exit_sn2(void);
extern void xp_exit_uv(void);
#endif /* _DRIVERS_MISC_SGIXP_XP_H */

View File

@@ -233,9 +233,7 @@ xp_init(void)
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
if (is_shub())
ret = xp_init_sn2();
else if (is_uv())
if (is_uv())
ret = xp_init_uv();
else
ret = 0;
@@ -251,9 +249,7 @@ module_init(xp_init);
void __exit
xp_exit(void)
{
if (is_shub())
xp_exit_sn2();
else if (is_uv())
if (is_uv())
xp_exit_uv();
}

View File

@@ -1,35 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function
* will be registered as a nofault code block. In the event that the
* PIO read fails, the MCA handler will force the error to look
* corrected and vector to the xp_error_PIOR which will return an error.
*
* The definition of "consumption" and the time it takes for an MCA
* to surface is processor implementation specific. This code
* is sufficient on Itanium through the Montvale processor family.
* It may need to be adjusted for future processor implementations.
*
* extern int xp_nofault_PIOR(void *remote_register);
*/
.global xp_nofault_PIOR
xp_nofault_PIOR:
mov r8=r0 // Stage a success return value
ld8.acq r9=[r32];; // PIO Read the specified register
adds r9=1,r9;; // Add to force consumption
srlz.i;; // Allow time for MCA to surface
br.ret.sptk.many b0;; // Return success
.global xp_error_PIOR
xp_error_PIOR:
mov r8=1 // Return value of 1
br.ret.sptk.many b0;; // Return failure

View File

@@ -1,190 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) sn2-based functions.
*
* Architecture specific implementation of common functions.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include "xp.h"
/*
* The export of xp_nofault_PIOR needs to happen here since it is defined
* in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
* defined here.
*/
EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
u64 xp_nofault_PIOR_target;
EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
/*
* Register a nofault code region which performs a cross-partition PIO read.
* If the PIO read times out, the MCA handler will consume the error and
* return to a kernel-provided instruction to indicate an error. This PIO read
* exists because it is guaranteed to timeout if the destination is down
* (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
* which unfortunately we have to work around).
*/
static enum xp_retval
xp_register_nofault_code_sn2(void)
{
int ret;
u64 func_addr;
u64 err_func_addr;
func_addr = *(u64 *)xp_nofault_PIOR;
err_func_addr = *(u64 *)xp_error_PIOR;
ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
1, 1);
if (ret != 0) {
dev_err(xp, "can't register nofault code, error=%d\n", ret);
return xpSalError;
}
/*
* Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.)
*/
if (is_shub1())
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
else if (is_shub2())
xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
return xpSuccess;
}
static void
xp_unregister_nofault_code_sn2(void)
{
u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *)xp_error_PIOR;
/* unregister the PIO read nofault code region */
(void)sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0);
}
/*
* Convert a virtual memory address to a physical memory address.
*/
static unsigned long
xp_pa_sn2(void *addr)
{
return __pa(addr);
}
/*
* Convert a global physical to a socket physical address.
*/
static unsigned long
xp_socket_pa_sn2(unsigned long gpa)
{
return gpa;
}
/*
* Wrapper for bte_copy().
*
* dst_pa - physical address of the destination of the transfer.
* src_pa - physical address of the source of the transfer.
* len - number of bytes to transfer from source to destination.
*
* Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
*/
static enum xp_retval
xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
size_t len)
{
bte_result_t ret;
ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (ret == BTE_SUCCESS)
return xpSuccess;
if (is_shub2()) {
dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
"0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
src_pa, len);
} else {
dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
"src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
}
return xpBteCopyError;
}
static int
xp_cpu_to_nasid_sn2(int cpuid)
{
return cpuid_to_nasid(cpuid);
}
static enum xp_retval
xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
{
u64 nasid_array = 0;
int ret;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
&nasid_array);
if (ret != 0) {
dev_err(xp, "sn_change_memprotect(,, "
"SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
return xpSalError;
}
return xpSuccess;
}
static enum xp_retval
xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
{
u64 nasid_array = 0;
int ret;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
&nasid_array);
if (ret != 0) {
dev_err(xp, "sn_change_memprotect(,, "
"SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
return xpSalError;
}
return xpSuccess;
}
enum xp_retval
xp_init_sn2(void)
{
BUG_ON(!is_shub());
xp_max_npartitions = XP_MAX_NPARTITIONS_SN2;
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;
xp_restrict_memprotect = xp_restrict_memprotect_sn2;
return xp_register_nofault_code_sn2();
}
void
xp_exit_sn2(void)
{
BUG_ON(!is_shub());
xp_unregister_nofault_code_sn2();
}

View File

@@ -17,7 +17,7 @@
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
#include <asm/sn/sn_sal.h>
#endif
#include "../sgi-gru/grukservices.h"
@@ -99,7 +99,7 @@ xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
return xpBiosError;
}
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
u64 nasid_array;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
@@ -129,7 +129,7 @@ xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
return xpBiosError;
}
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
u64 nasid_array;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
@@ -151,9 +151,10 @@ xp_init_uv(void)
BUG_ON(!is_uv());
xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
#ifdef CONFIG_X86
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
#endif
xp_pa = xp_pa_uv;
xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;

View File

@@ -71,14 +71,10 @@
* 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids
* and xpc_mach_nasids.)
*
* vars (ia64-sn2 only)
* vars part (ia64-sn2 only)
*
* Immediately following the mach_nasids mask are the XPC variables
* required by other partitions. First are those that are generic to all
* partitions (vars), followed on the next available cacheline by those
* which are partition specific (vars part). These are setup by XPC.
* (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
*
* Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been
* initialized.
@@ -92,9 +88,6 @@ struct xpc_rsvd_page {
u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
union {
struct {
unsigned long vars_pa; /* phys addr */
} sn2;
struct {
unsigned long heartbeat_gpa; /* phys addr */
unsigned long activate_gru_mq_desc_gpa; /* phys addr */
@@ -106,84 +99,14 @@ struct xpc_rsvd_page {
#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */
/*
* Define the structures by which XPC variables can be exported to other
* partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
*/
/*
* The following structure describes the partition generic variables
* needed by other partitions in order to properly initialize.
*
* struct xpc_vars version number also applies to struct xpc_vars_part.
* Changes to either structure and/or related functionality should be
* reflected by incrementing either the major or minor version numbers
* of struct xpc_vars.
*/
struct xpc_vars_sn2 {
u8 version;
u64 heartbeat;
DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
u64 heartbeat_offline; /* if 0, heartbeat should be changing */
int activate_IRQ_nasid;
int activate_IRQ_phys_cpuid;
unsigned long vars_part_pa;
unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */
struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
};
#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
/*
* The following structure describes the per partition specific variables.
*
* An array of these structures, one per partition, will be defined. As a
* partition becomes active XPC will copy the array entry corresponding to
* itself from that partition. It is desirable that the size of this structure
* evenly divides into a 128-byte cacheline, such that none of the entries in
* this array crosses a 128-byte cacheline boundary. As it is now, each entry
* occupies 64-bytes.
*/
struct xpc_vars_part_sn2 {
u64 magic;
unsigned long openclose_args_pa; /* phys addr of open and close args */
unsigned long GPs_pa; /* physical address of Get/Put values */
unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */
int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
u8 nchannels; /* #of defined channels supported */
u8 reserved[23]; /* pad to a full 64 bytes */
};
/*
* The vars_part MAGIC numbers play a part in the first contact protocol.
*
* MAGIC1 indicates that the per partition specific variables for a remote
* partition have been initialized by this partition.
*
* MAGIC2 indicates that this partition has pulled the remote partititions
* per partition variables that pertain to this partition.
*/
#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
/* the reserved page sizes and offsets */
#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2))
#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \
XPC_RP_HEADER_SIZE))
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
xpc_nasid_mask_nlongs)
#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \
(XPC_RP_MACH_NASIDS(_rp) + \
xpc_nasid_mask_nlongs))
/*
@@ -297,17 +220,6 @@ struct xpc_activate_mq_msg_chctl_opencomplete_uv {
#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff)
#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff)
/*
* Define a Get/Put value pair (pointers) used with a message queue.
*/
struct xpc_gp_sn2 {
s64 get; /* Get value */
s64 put; /* Put value */
};
#define XPC_GP_SIZE \
L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS)
/*
* Define a structure that contains arguments associated with opening and
* closing a channel.
@@ -340,30 +252,6 @@ struct xpc_fifo_head_uv {
int n_entries;
};
/*
* Define a sn2 styled message.
*
* A user-defined message resides in the payload area. The max size of the
* payload is defined by the user via xpc_connect().
*
* The size of a message entry (within a message queue) must be a 128-byte
* cacheline sized multiple in order to facilitate the BTE transfer of messages
* from one message queue to another.
*/
struct xpc_msg_sn2 {
u8 flags; /* FOR XPC INTERNAL USE ONLY */
u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
s64 number; /* FOR XPC INTERNAL USE ONLY */
u64 payload; /* user defined portion of message */
};
/* struct xpc_msg_sn2 flags */
#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */
#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */
#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */
/*
* The format of a uv XPC notify_mq GRU message is as follows:
*
@@ -390,20 +278,6 @@ struct xpc_notify_mq_msg_uv {
unsigned long payload;
};
/*
* Define sn2's notify entry.
*
* This is used to notify a message's sender that their message was received
* and consumed by the intended recipient.
*/
struct xpc_notify_sn2 {
u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */
xpc_notify_func func; /* user's notify function */
void *key; /* pointer to user's key */
};
/* struct xpc_notify_sn2 type of notification */
#define XPC_N_CALL 0x01 /* notify function provided by user */
@@ -431,102 +305,6 @@ struct xpc_send_msg_slot_uv {
* of these structures for each potential channel connection to that partition.
*/
/*
* The following is sn2 only.
*
* Each channel structure manages two message queues (circular buffers).
* They are allocated at the time a channel connection is made. One of
* these message queues (local_msgqueue) holds the locally created messages
* that are destined for the remote partition. The other of these message
* queues (remote_msgqueue) is a locally cached copy of the remote partition's
* own local_msgqueue.
*
* The following is a description of the Get/Put pointers used to manage these
* two message queues. Consider the local_msgqueue to be on one partition
* and the remote_msgqueue to be its cached copy on another partition. A
* description of what each of the lettered areas contains is included.
*
*
* local_msgqueue remote_msgqueue
*
* |/////////| |/////////|
* w_remote_GP.get --> +---------+ |/////////|
* | F | |/////////|
* remote_GP.get --> +---------+ +---------+ <-- local_GP->get
* | | | |
* | | | E |
* | | | |
* | | +---------+ <-- w_local_GP.get
* | B | |/////////|
* | | |////D////|
* | | |/////////|
* | | +---------+ <-- w_remote_GP.put
* | | |////C////|
* local_GP->put --> +---------+ +---------+ <-- remote_GP.put
* | | |/////////|
* | A | |/////////|
* | | |/////////|
* w_local_GP.put --> +---------+ |/////////|
* |/////////| |/////////|
*
*
* ( remote_GP.[get|put] are cached copies of the remote
* partition's local_GP->[get|put], and thus their values can
* lag behind their counterparts on the remote partition. )
*
*
* A - Messages that have been allocated, but have not yet been sent to the
* remote partition.
*
* B - Messages that have been sent, but have not yet been acknowledged by the
* remote partition as having been received.
*
* C - Area that needs to be prepared for the copying of sent messages, by
* the clearing of the message flags of any previously received messages.
*
* D - Area into which sent messages are to be copied from the remote
* partition's local_msgqueue and then delivered to their intended
* recipients. [ To allow for a multi-message copy, another pointer
* (next_msg_to_pull) has been added to keep track of the next message
* number needing to be copied (pulled). It chases after w_remote_GP.put.
* Any messages lying between w_local_GP.get and next_msg_to_pull have
* been copied and are ready to be delivered. ]
*
* E - Messages that have been copied and delivered, but have not yet been
* acknowledged by the recipient as having been received.
*
* F - Messages that have been acknowledged, but XPC has not yet notified the
* sender that the message was received by its intended recipient.
* This is also an area that needs to be prepared for the allocating of
* new messages, by the clearing of the message flags of the acknowledged
* messages.
*/
struct xpc_channel_sn2 {
struct xpc_openclose_args *local_openclose_args; /* args passed on */
/* opening or closing of channel */
void *local_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg_sn2 *local_msgqueue; /* local message queue */
void *remote_msgqueue_base; /* base address of kmalloc'd space */
struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */
/* partition's local message queue */
unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */
/* local message queue */
struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */
/* various flavors of local and remote Get/Put values */
struct xpc_gp_sn2 *local_GP; /* local Get/Put values */
struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */
struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */
struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */
s64 next_msg_to_pull; /* Put value of next msg to pull */
struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
};
struct xpc_channel_uv {
void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */
/* gru mq descriptor */
@@ -579,7 +357,6 @@ struct xpc_channel {
wait_queue_head_t idle_wq; /* idle kthread wait queue */
union {
struct xpc_channel_sn2 sn2;
struct xpc_channel_uv uv;
} sn;
@@ -666,43 +443,6 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
return 0;
}
/*
* Manage channels on a partition basis. There is one of these structures
* for each partition (a partition will never utilize the structure that
* represents itself).
*/
struct xpc_partition_sn2 {
unsigned long remote_amos_page_pa; /* paddr of partition's amos page */
int activate_IRQ_nasid; /* active partition's act/deact nasid */
int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
unsigned long remote_vars_pa; /* phys addr of partition's vars */
unsigned long remote_vars_part_pa; /* paddr of partition's vars part */
u8 remote_vars_version; /* version# of partition's vars */
void *local_GPs_base; /* base address of kmalloc'd space */
struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */
void *remote_GPs_base; /* base address of kmalloc'd space */
struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */
/* Get/Put values */
unsigned long remote_GPs_pa; /* phys addr of remote partition's local */
/* Get/Put values */
void *local_openclose_args_base; /* base address of kmalloc'd space */
struct xpc_openclose_args *local_openclose_args; /* local's args */
unsigned long remote_openclose_args_pa; /* phys addr of remote's args */
int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */
struct amo *local_chctl_amo_va; /* address of chctl flags' amo */
struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
};
struct xpc_partition_uv {
unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */
struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */
@@ -774,7 +514,6 @@ struct xpc_partition {
wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
union {
struct xpc_partition_sn2 sn2;
struct xpc_partition_uv uv;
} sn;
@@ -854,14 +593,6 @@ struct xpc_arch_operations {
#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
/*
* struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
* following interval #of seconds before checking for dropped notify IRQs.
* These can occur whenever an IRQ's associated amo write doesn't complete
* until after the IRQ was received.
*/
#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ)
/* number of seconds to wait for other partitions to disengage */
#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
@@ -888,10 +619,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
/* found in xpc_sn2.c */
extern int xpc_init_sn2(void);
extern void xpc_exit_sn2(void);
/* found in xpc_uv.c */
extern int xpc_init_uv(void);
extern void xpc_exit_uv(void);

View File

@@ -279,13 +279,6 @@ xpc_hb_checker(void *ignore)
dev_dbg(xpc_part, "checking remote heartbeats\n");
xpc_check_remote_hb();
/*
* On sn2 we need to periodically recheck to ensure no
* IRQ/amo pairs have been missed.
*/
if (is_shub())
force_IRQ = 1;
}
/* check for outstanding IRQs */
@@ -1050,9 +1043,7 @@ xpc_do_exit(enum xp_retval reason)
xpc_teardown_partitions();
if (is_shub())
xpc_exit_sn2();
else if (is_uv())
if (is_uv())
xpc_exit_uv();
}
@@ -1235,21 +1226,7 @@ xpc_init(void)
dev_set_name(xpc_part, "part");
dev_set_name(xpc_chan, "chan");
if (is_shub()) {
/*
* The ia64-sn2 architecture supports at most 64 partitions.
* And the inability to unregister remote amos restricts us
* further to only support exactly 64 partitions on this
* architecture, no less.
*/
if (xp_max_npartitions != 64) {
dev_err(xpc_part, "max #of partitions not set to 64\n");
ret = -EINVAL;
} else {
ret = xpc_init_sn2();
}
} else if (is_uv()) {
if (is_uv()) {
ret = xpc_init_uv();
} else {
@@ -1335,9 +1312,7 @@ out_2:
xpc_teardown_partitions();
out_1:
if (is_shub())
xpc_exit_sn2();
else if (is_uv())
if (is_uv())
xpc_exit_uv();
return ret;
}

View File

@@ -93,10 +93,6 @@ xpc_get_rsvd_page_pa(int nasid)
if (ret != xpNeedMoreInfo)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
if (is_shub())
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
@@ -452,7 +448,6 @@ xpc_discovery(void)
case 32:
max_regions *= 2;
region_size = 16;
DBUG_ON(!is_shub2());
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,7 @@
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#include <asm/uv/uv_irq.h>
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#endif
@@ -35,7 +35,7 @@
#include "../sgi-gru/grukservices.h"
#include "xpc.h"
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#if defined CONFIG_IA64_SGI_UV
struct uv_IO_APIC_route_entry {
__u64 vector : 8,
delivery_mode : 3,
@@ -48,6 +48,8 @@ struct uv_IO_APIC_route_entry {
__reserved_2 : 15,
dest : 32;
};
#define sn_partition_id 0
#endif
static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
@@ -119,7 +121,7 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
mq->irq = SGI_XPC_ACTIVATE;
else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
@@ -142,7 +144,7 @@ xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
#if defined CONFIG_X86_64
uv_teardown_irq(mq->irq);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
int mmr_pnode;
unsigned long mmr_value;
@@ -160,7 +162,7 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#if defined CONFIG_IA64_SGI_UV
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
@@ -195,7 +197,7 @@ xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
@@ -794,7 +796,7 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
else
ret = xpBiosError;
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#elif defined CONFIG_IA64_SGI_UV
status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
if (status == SALRET_OK)
ret = xpSuccess;

View File

@@ -515,7 +515,7 @@ xpnet_init(void)
{
int result;
if (!is_shub() && !is_uv())
if (!is_uv())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);