Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
This commit is contained in:
@@ -2012,18 +2012,9 @@ config SCx200HR_TIMER
|
||||
processor goes idle (as is done by the scheduler). The
|
||||
other workaround is idle=poll boot option.
|
||||
|
||||
config GEODE_MFGPT_TIMER
|
||||
def_bool y
|
||||
prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
|
||||
depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
|
||||
---help---
|
||||
This driver provides a clock event source based on the MFGPT
|
||||
timer(s) in the CS5535 and CS5536 companion chip for the geode.
|
||||
MFGPTs have a better resolution and max interval than the
|
||||
generic PIT, and are suitable for use as high-res timers.
|
||||
|
||||
config OLPC
|
||||
bool "One Laptop Per Child support"
|
||||
select GPIOLIB
|
||||
default n
|
||||
---help---
|
||||
Add support for detecting the unique features of the OLPC
|
||||
|
@@ -187,8 +187,8 @@ config HAVE_MMIOTRACE_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config X86_DECODER_SELFTEST
|
||||
bool "x86 instruction decoder selftest"
|
||||
depends on DEBUG_KERNEL
|
||||
bool "x86 instruction decoder selftest"
|
||||
depends on DEBUG_KERNEL && KPROBES
|
||||
---help---
|
||||
Perform x86 instruction decoder selftests at build time.
|
||||
This option is useful for checking the sanity of x86 instruction
|
||||
|
@@ -16,7 +16,7 @@
|
||||
*/
|
||||
|
||||
#include <asm/segment.h>
|
||||
#include <linux/utsrelease.h>
|
||||
#include <generated/utsrelease.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/page_types.h>
|
||||
|
@@ -13,8 +13,8 @@
|
||||
*/
|
||||
|
||||
#include "boot.h"
|
||||
#include <linux/utsrelease.h>
|
||||
#include <linux/compile.h>
|
||||
#include <generated/utsrelease.h>
|
||||
#include <generated/compile.h>
|
||||
|
||||
const char kernel_version[] =
|
||||
UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") "
|
||||
|
@@ -696,7 +696,7 @@ ia32_sys_call_table:
|
||||
.quad quiet_ni_syscall /* streams2 */
|
||||
.quad stub32_vfork /* 190 */
|
||||
.quad compat_sys_getrlimit
|
||||
.quad sys32_mmap2
|
||||
.quad sys_mmap_pgoff
|
||||
.quad sys32_truncate64
|
||||
.quad sys32_ftruncate64
|
||||
.quad sys32_stat64 /* 195 */
|
||||
|
@@ -155,9 +155,6 @@ struct mmap_arg_struct {
|
||||
asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
|
||||
{
|
||||
struct mmap_arg_struct a;
|
||||
struct file *file = NULL;
|
||||
unsigned long retval;
|
||||
struct mm_struct *mm ;
|
||||
|
||||
if (copy_from_user(&a, arg, sizeof(a)))
|
||||
return -EFAULT;
|
||||
@@ -165,22 +162,8 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
|
||||
if (a.offset & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(a.flags & MAP_ANONYMOUS)) {
|
||||
file = fget(a.fd);
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
mm = current->mm;
|
||||
down_write(&mm->mmap_sem);
|
||||
retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
|
||||
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
|
||||
a.offset>>PAGE_SHIFT);
|
||||
if (file)
|
||||
fput(file);
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_mprotect(unsigned long start, size_t len,
|
||||
@@ -483,30 +466,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long pgoff)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long error;
|
||||
struct file *file = NULL;
|
||||
|
||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
if (file)
|
||||
fput(file);
|
||||
return error;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
|
||||
{
|
||||
char *arch = "x86_64";
|
||||
|
1
arch/x86/include/asm/asm-offsets.h
Normal file
1
arch/x86/include/asm/asm-offsets.h
Normal file
@@ -0,0 +1 @@
|
||||
#include <generated/asm-offsets.h>
|
@@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
|
@@ -239,7 +239,6 @@ extern int force_personality32;
|
||||
#endif /* !CONFIG_X86_32 */
|
||||
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
|
@@ -12,160 +12,7 @@
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/* Generic southbridge functions */
|
||||
|
||||
#define GEODE_DEV_PMS 0
|
||||
#define GEODE_DEV_ACPI 1
|
||||
#define GEODE_DEV_GPIO 2
|
||||
#define GEODE_DEV_MFGPT 3
|
||||
|
||||
extern int geode_get_dev_base(unsigned int dev);
|
||||
|
||||
/* Useful macros */
|
||||
#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
|
||||
#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
|
||||
#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
|
||||
#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
|
||||
|
||||
/* MSRS */
|
||||
|
||||
#define MSR_GLIU_P2D_RO0 0x10000029
|
||||
|
||||
#define MSR_LX_GLD_MSR_CONFIG 0x48002001
|
||||
#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
|
||||
* sheet has the wrong value */
|
||||
#define MSR_GLCP_SYS_RSTPLL 0x4C000014
|
||||
#define MSR_GLCP_DOTPLL 0x4C000015
|
||||
|
||||
#define MSR_LBAR_SMB 0x5140000B
|
||||
#define MSR_LBAR_GPIO 0x5140000C
|
||||
#define MSR_LBAR_MFGPT 0x5140000D
|
||||
#define MSR_LBAR_ACPI 0x5140000E
|
||||
#define MSR_LBAR_PMS 0x5140000F
|
||||
|
||||
#define MSR_DIVIL_SOFT_RESET 0x51400017
|
||||
|
||||
#define MSR_PIC_YSEL_LOW 0x51400020
|
||||
#define MSR_PIC_YSEL_HIGH 0x51400021
|
||||
#define MSR_PIC_ZSEL_LOW 0x51400022
|
||||
#define MSR_PIC_ZSEL_HIGH 0x51400023
|
||||
#define MSR_PIC_IRQM_LPC 0x51400025
|
||||
|
||||
#define MSR_MFGPT_IRQ 0x51400028
|
||||
#define MSR_MFGPT_NR 0x51400029
|
||||
#define MSR_MFGPT_SETUP 0x5140002B
|
||||
|
||||
#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
|
||||
|
||||
#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
|
||||
#define MSR_GX_MSR_PADSEL 0xC0002011
|
||||
|
||||
/* Resource Sizes */
|
||||
|
||||
#define LBAR_GPIO_SIZE 0xFF
|
||||
#define LBAR_MFGPT_SIZE 0x40
|
||||
#define LBAR_ACPI_SIZE 0x40
|
||||
#define LBAR_PMS_SIZE 0x80
|
||||
|
||||
/* ACPI registers (PMS block) */
|
||||
|
||||
/*
|
||||
* PM1_EN is only valid when VSA is enabled for 16 bit reads.
|
||||
* When VSA is not enabled, *always* read both PM1_STS and PM1_EN
|
||||
* with a 32 bit read at offset 0x0
|
||||
*/
|
||||
|
||||
#define PM1_STS 0x00
|
||||
#define PM1_EN 0x02
|
||||
#define PM1_CNT 0x08
|
||||
#define PM2_CNT 0x0C
|
||||
#define PM_TMR 0x10
|
||||
#define PM_GPE0_STS 0x18
|
||||
#define PM_GPE0_EN 0x1C
|
||||
|
||||
/* PMC registers (PMS block) */
|
||||
|
||||
#define PM_SSD 0x00
|
||||
#define PM_SCXA 0x04
|
||||
#define PM_SCYA 0x08
|
||||
#define PM_OUT_SLPCTL 0x0C
|
||||
#define PM_SCLK 0x10
|
||||
#define PM_SED 0x1
|
||||
#define PM_SCXD 0x18
|
||||
#define PM_SCYD 0x1C
|
||||
#define PM_IN_SLPCTL 0x20
|
||||
#define PM_WKD 0x30
|
||||
#define PM_WKXD 0x34
|
||||
#define PM_RD 0x38
|
||||
#define PM_WKXA 0x3C
|
||||
#define PM_FSD 0x40
|
||||
#define PM_TSD 0x44
|
||||
#define PM_PSD 0x48
|
||||
#define PM_NWKD 0x4C
|
||||
#define PM_AWKD 0x50
|
||||
#define PM_SSC 0x54
|
||||
|
||||
/* VSA2 magic values */
|
||||
|
||||
#define VSA_VRC_INDEX 0xAC1C
|
||||
#define VSA_VRC_DATA 0xAC1E
|
||||
#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
|
||||
#define VSA_VR_SIGNATURE 0x0003
|
||||
#define VSA_VR_MEM_SIZE 0x0200
|
||||
#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
|
||||
#define GSW_VSA_SIG 0x534d /* General Software signature */
|
||||
/* GPIO */
|
||||
|
||||
#define GPIO_OUTPUT_VAL 0x00
|
||||
#define GPIO_OUTPUT_ENABLE 0x04
|
||||
#define GPIO_OUTPUT_OPEN_DRAIN 0x08
|
||||
#define GPIO_OUTPUT_INVERT 0x0C
|
||||
#define GPIO_OUTPUT_AUX1 0x10
|
||||
#define GPIO_OUTPUT_AUX2 0x14
|
||||
#define GPIO_PULL_UP 0x18
|
||||
#define GPIO_PULL_DOWN 0x1C
|
||||
#define GPIO_INPUT_ENABLE 0x20
|
||||
#define GPIO_INPUT_INVERT 0x24
|
||||
#define GPIO_INPUT_FILTER 0x28
|
||||
#define GPIO_INPUT_EVENT_COUNT 0x2C
|
||||
#define GPIO_READ_BACK 0x30
|
||||
#define GPIO_INPUT_AUX1 0x34
|
||||
#define GPIO_EVENTS_ENABLE 0x38
|
||||
#define GPIO_LOCK_ENABLE 0x3C
|
||||
#define GPIO_POSITIVE_EDGE_EN 0x40
|
||||
#define GPIO_NEGATIVE_EDGE_EN 0x44
|
||||
#define GPIO_POSITIVE_EDGE_STS 0x48
|
||||
#define GPIO_NEGATIVE_EDGE_STS 0x4C
|
||||
|
||||
#define GPIO_MAP_X 0xE0
|
||||
#define GPIO_MAP_Y 0xE4
|
||||
#define GPIO_MAP_Z 0xE8
|
||||
#define GPIO_MAP_W 0xEC
|
||||
|
||||
static inline u32 geode_gpio(unsigned int nr)
|
||||
{
|
||||
BUG_ON(nr > 28);
|
||||
return 1 << nr;
|
||||
}
|
||||
|
||||
extern void geode_gpio_set(u32, unsigned int);
|
||||
extern void geode_gpio_clear(u32, unsigned int);
|
||||
extern int geode_gpio_isset(u32, unsigned int);
|
||||
extern void geode_gpio_setup_event(unsigned int, int, int);
|
||||
extern void geode_gpio_set_irq(unsigned int, unsigned int);
|
||||
|
||||
static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
|
||||
{
|
||||
geode_gpio_setup_event(gpio, pair, 0);
|
||||
}
|
||||
|
||||
static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
|
||||
{
|
||||
geode_gpio_setup_event(gpio, pair, 1);
|
||||
}
|
||||
|
||||
/* Specific geode tests */
|
||||
#include <linux/cs5535.h>
|
||||
|
||||
static inline int is_geode_gx(void)
|
||||
{
|
||||
@@ -186,68 +33,4 @@ static inline int is_geode(void)
|
||||
return (is_geode_gx() || is_geode_lx());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MGEODE_LX
|
||||
extern int geode_has_vsa2(void);
|
||||
#else
|
||||
static inline int geode_has_vsa2(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* MFGPTs */
|
||||
|
||||
#define MFGPT_MAX_TIMERS 8
|
||||
#define MFGPT_TIMER_ANY (-1)
|
||||
|
||||
#define MFGPT_DOMAIN_WORKING 1
|
||||
#define MFGPT_DOMAIN_STANDBY 2
|
||||
#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
|
||||
|
||||
#define MFGPT_CMP1 0
|
||||
#define MFGPT_CMP2 1
|
||||
|
||||
#define MFGPT_EVENT_IRQ 0
|
||||
#define MFGPT_EVENT_NMI 1
|
||||
#define MFGPT_EVENT_RESET 3
|
||||
|
||||
#define MFGPT_REG_CMP1 0
|
||||
#define MFGPT_REG_CMP2 2
|
||||
#define MFGPT_REG_COUNTER 4
|
||||
#define MFGPT_REG_SETUP 6
|
||||
|
||||
#define MFGPT_SETUP_CNTEN (1 << 15)
|
||||
#define MFGPT_SETUP_CMP2 (1 << 14)
|
||||
#define MFGPT_SETUP_CMP1 (1 << 13)
|
||||
#define MFGPT_SETUP_SETUP (1 << 12)
|
||||
#define MFGPT_SETUP_STOPEN (1 << 11)
|
||||
#define MFGPT_SETUP_EXTEN (1 << 10)
|
||||
#define MFGPT_SETUP_REVEN (1 << 5)
|
||||
#define MFGPT_SETUP_CLKSEL (1 << 4)
|
||||
|
||||
static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
|
||||
{
|
||||
u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
|
||||
outw(value, base + reg + (timer * 8));
|
||||
}
|
||||
|
||||
static inline u16 geode_mfgpt_read(int timer, u16 reg)
|
||||
{
|
||||
u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
|
||||
return inw(base + reg + (timer * 8));
|
||||
}
|
||||
|
||||
extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
|
||||
extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
|
||||
extern int geode_mfgpt_alloc_timer(int timer, int domain);
|
||||
|
||||
#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
|
||||
#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
|
||||
|
||||
#ifdef CONFIG_GEODE_MFGPT_TIMER
|
||||
extern int __init mfgpt_timer_setup(void);
|
||||
#else
|
||||
static inline int mfgpt_timer_setup(void) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_GEODE_H */
|
||||
|
@@ -120,7 +120,7 @@ extern int olpc_ec_mask_unset(uint8_t bits);
|
||||
|
||||
/* GPIO assignments */
|
||||
|
||||
#define OLPC_GPIO_MIC_AC geode_gpio(1)
|
||||
#define OLPC_GPIO_MIC_AC 1
|
||||
#define OLPC_GPIO_DCON_IRQ geode_gpio(7)
|
||||
#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
|
||||
#define OLPC_GPIO_SMB_CLK geode_gpio(14)
|
||||
|
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
|
||||
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
|
||||
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
|
||||
}
|
||||
|
||||
static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
|
||||
static inline int arch_spin_is_contended(struct arch_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
||||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
#define arch_spin_is_contended arch_spin_is_contended
|
||||
|
||||
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
|
||||
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
|
||||
{
|
||||
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
|
||||
static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
|
||||
unsigned long flags)
|
||||
{
|
||||
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
|
||||
}
|
||||
|
||||
static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
|
||||
static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
|
||||
static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
|
||||
{
|
||||
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
|
||||
}
|
||||
|
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
|
||||
phys_addr_t phys, pgprot_t flags);
|
||||
};
|
||||
|
||||
struct raw_spinlock;
|
||||
struct arch_spinlock;
|
||||
struct pv_lock_ops {
|
||||
int (*spin_is_locked)(struct raw_spinlock *lock);
|
||||
int (*spin_is_contended)(struct raw_spinlock *lock);
|
||||
void (*spin_lock)(struct raw_spinlock *lock);
|
||||
void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
|
||||
int (*spin_trylock)(struct raw_spinlock *lock);
|
||||
void (*spin_unlock)(struct raw_spinlock *lock);
|
||||
int (*spin_is_locked)(struct arch_spinlock *lock);
|
||||
int (*spin_is_contended)(struct arch_spinlock *lock);
|
||||
void (*spin_lock)(struct arch_spinlock *lock);
|
||||
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
|
||||
int (*spin_trylock)(struct arch_spinlock *lock);
|
||||
void (*spin_unlock)(struct arch_spinlock *lock);
|
||||
};
|
||||
|
||||
/* This contains all the paravirt structures: we get a convenient
|
||||
|
@@ -118,11 +118,27 @@ extern int __init pcibios_init(void);
|
||||
|
||||
/* pci-mmconfig.c */
|
||||
|
||||
/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
|
||||
#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
|
||||
|
||||
struct pci_mmcfg_region {
|
||||
struct list_head list;
|
||||
struct resource res;
|
||||
u64 address;
|
||||
char __iomem *virt;
|
||||
u16 segment;
|
||||
u8 start_bus;
|
||||
u8 end_bus;
|
||||
char name[PCI_MMCFG_RESOURCE_NAME_LEN];
|
||||
};
|
||||
|
||||
extern int __init pci_mmcfg_arch_init(void);
|
||||
extern void __init pci_mmcfg_arch_free(void);
|
||||
extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
|
||||
|
||||
extern struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
extern int pci_mmcfg_config_num;
|
||||
extern struct list_head pci_mmcfg_list;
|
||||
|
||||
#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
|
||||
|
||||
/*
|
||||
* AMD Fam10h CPUs are buggy, and cannot access MMIO config space
|
||||
|
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
|
||||
|
||||
#define percpu_to_op(op, var, val) \
|
||||
do { \
|
||||
typedef typeof(var) T__; \
|
||||
typedef typeof(var) pto_T__; \
|
||||
if (0) { \
|
||||
T__ tmp__; \
|
||||
tmp__ = (val); \
|
||||
pto_T__ pto_tmp__; \
|
||||
pto_tmp__ = (val); \
|
||||
} \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "qi" ((T__)(val))); \
|
||||
: "qi" ((pto_T__)(val))); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "ri" ((T__)(val))); \
|
||||
: "ri" ((pto_T__)(val))); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "ri" ((T__)(val))); \
|
||||
: "ri" ((pto_T__)(val))); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "re" ((T__)(val))); \
|
||||
: "re" ((pto_T__)(val))); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
@@ -106,31 +106,31 @@ do { \
|
||||
|
||||
#define percpu_from_op(op, var, constraint) \
|
||||
({ \
|
||||
typeof(var) ret__; \
|
||||
typeof(var) pfo_ret__; \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b "__percpu_arg(1)",%0" \
|
||||
: "=q" (ret__) \
|
||||
: "=q" (pfo_ret__) \
|
||||
: constraint); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w "__percpu_arg(1)",%0" \
|
||||
: "=r" (ret__) \
|
||||
: "=r" (pfo_ret__) \
|
||||
: constraint); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l "__percpu_arg(1)",%0" \
|
||||
: "=r" (ret__) \
|
||||
: "=r" (pfo_ret__) \
|
||||
: constraint); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q "__percpu_arg(1)",%0" \
|
||||
: "=r" (ret__) \
|
||||
: "=r" (pfo_ret__) \
|
||||
: constraint); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
ret__; \
|
||||
pfo_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -153,6 +153,84 @@ do { \
|
||||
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
|
||||
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
|
||||
|
||||
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
|
||||
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
/*
|
||||
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||
* 32 bit must fall back to generic operations.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
||||
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
#endif
|
||||
|
||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||
#define x86_test_and_clear_bit_percpu(bit, var) \
|
||||
({ \
|
||||
|
@@ -292,6 +292,8 @@ extern void user_enable_block_step(struct task_struct *);
|
||||
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
|
||||
#endif
|
||||
|
||||
#define ARCH_HAS_USER_SINGLE_STEP_INFO
|
||||
|
||||
struct user_desc;
|
||||
extern int do_get_thread_area(struct task_struct *p, int idx,
|
||||
struct user_desc __user *info);
|
||||
|
@@ -58,7 +58,7 @@
|
||||
#if (NR_CPUS < 256)
|
||||
#define TICKET_SHIFT 8
|
||||
|
||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
short inc = 0x0100;
|
||||
|
||||
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp, new;
|
||||
|
||||
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
||||
: "+m" (lock->slock)
|
||||
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||
#else
|
||||
#define TICKET_SHIFT 16
|
||||
|
||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
int inc = 0x00010000;
|
||||
int tmp;
|
||||
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp;
|
||||
int new;
|
||||
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
||||
: "+m" (lock->slock)
|
||||
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
||||
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp = ACCESS_ONCE(lock->slock);
|
||||
|
||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
||||
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp = ACCESS_ONCE(lock->slock);
|
||||
|
||||
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
||||
|
||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_locked(lock);
|
||||
}
|
||||
|
||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
||||
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_contended(lock);
|
||||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
#define arch_spin_is_contended arch_spin_is_contended
|
||||
|
||||
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
__ticket_spin_lock(lock);
|
||||
}
|
||||
|
||||
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_trylock(lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
__ticket_spin_unlock(lock);
|
||||
}
|
||||
|
||||
static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
||||
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||
unsigned long flags)
|
||||
{
|
||||
__raw_spin_lock(lock);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
{
|
||||
while (__raw_spin_is_locked(lock))
|
||||
while (arch_spin_is_locked(lock))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
return (int)(lock)->lock > 0;
|
||||
}
|
||||
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
return (lock)->lock == RW_LOCK_BIAS;
|
||||
}
|
||||
|
||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
|
||||
"jns 1f\n"
|
||||
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||
::LOCK_PTR_REG (rw) : "memory");
|
||||
}
|
||||
|
||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
|
||||
"jz 1f\n"
|
||||
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
|
||||
}
|
||||
|
||||
static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
||||
static inline int arch_read_trylock(arch_rwlock_t *lock)
|
||||
{
|
||||
atomic_t *count = (atomic_t *)lock;
|
||||
|
||||
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
||||
static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||
{
|
||||
atomic_t *count = (atomic_t *)lock;
|
||||
|
||||
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
|
||||
}
|
||||
|
||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "addl %1, %0"
|
||||
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
|
||||
}
|
||||
|
||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
#define _raw_spin_relax(lock) cpu_relax()
|
||||
#define _raw_read_relax(lock) cpu_relax()
|
||||
#define _raw_write_relax(lock) cpu_relax()
|
||||
#define arch_spin_relax(lock) cpu_relax()
|
||||
#define arch_read_relax(lock) cpu_relax()
|
||||
#define arch_write_relax(lock) cpu_relax()
|
||||
|
||||
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
||||
static inline void smp_mb__after_lock(void) { }
|
||||
|
@@ -5,16 +5,16 @@
|
||||
# error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
typedef struct raw_spinlock {
|
||||
typedef struct arch_spinlock {
|
||||
unsigned int slock;
|
||||
} raw_spinlock_t;
|
||||
} arch_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
unsigned int lock;
|
||||
} raw_rwlock_t;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||
|
||||
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
|
||||
|
@@ -30,7 +30,6 @@ struct mmap_arg_struct;
|
||||
asmlinkage long sys32_mmap(struct mmap_arg_struct __user *);
|
||||
asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long);
|
||||
|
||||
asmlinkage long sys32_pipe(int __user *);
|
||||
struct sigaction32;
|
||||
struct old_sigaction32;
|
||||
asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
|
||||
@@ -57,9 +56,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32);
|
||||
asmlinkage long sys32_personality(unsigned long);
|
||||
asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
|
||||
|
||||
asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long);
|
||||
|
||||
struct oldold_utsname;
|
||||
struct old_utsname;
|
||||
asmlinkage long sys32_olduname(struct oldold_utsname __user *);
|
||||
|
@@ -56,8 +56,6 @@ struct sel_arg_struct;
|
||||
struct oldold_utsname;
|
||||
struct old_utsname;
|
||||
|
||||
asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long);
|
||||
asmlinkage int old_mmap(struct mmap_arg_struct __user *);
|
||||
asmlinkage int old_select(struct sel_arg_struct __user *);
|
||||
asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
|
||||
|
@@ -35,11 +35,16 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Node not present */
|
||||
#define NUMA_NO_NODE (-1)
|
||||
/*
|
||||
* to preserve the visibility of NUMA_NO_NODE definition,
|
||||
* moved to there from here. May be used independent of
|
||||
* CONFIG_NUMA.
|
||||
*/
|
||||
#include <linux/numa.h>
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#include <asm/mpspec.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@@ -76,15 +76,6 @@ union partition_info_u {
|
||||
};
|
||||
};
|
||||
|
||||
union uv_watchlist_u {
|
||||
u64 val;
|
||||
struct {
|
||||
u64 blade : 16,
|
||||
size : 32,
|
||||
filler : 16;
|
||||
};
|
||||
};
|
||||
|
||||
enum uv_memprotect {
|
||||
UV_MEMPROT_RESTRICT_ACCESS,
|
||||
UV_MEMPROT_ALLOW_AMO,
|
||||
@@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
|
||||
|
||||
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
|
||||
extern s64 uv_bios_freq_base(u64, u64 *);
|
||||
extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
|
||||
extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
|
||||
unsigned long *);
|
||||
extern int uv_bios_mq_watchlist_free(int, int);
|
||||
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
|
||||
|
@@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
|
||||
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
|
||||
|
||||
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
|
||||
|
||||
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
|
||||
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
|
||||
|
||||
@@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v)
|
||||
return uv_soc_phys_ram_to_gpa(__pa(v));
|
||||
}
|
||||
|
||||
/* Top two bits indicate the requested address is in MMR space. */
|
||||
static inline int
|
||||
uv_gpa_in_mmr_space(unsigned long gpa)
|
||||
{
|
||||
return (gpa >> 62) == 0x3UL;
|
||||
}
|
||||
|
||||
/* UV global physical address --> socket phys RAM */
|
||||
static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
|
||||
{
|
||||
unsigned long paddr = gpa & uv_hub_info->gpa_mask;
|
||||
unsigned long remap_base = uv_hub_info->lowmem_remap_base;
|
||||
unsigned long remap_top = uv_hub_info->lowmem_remap_top;
|
||||
|
||||
if (paddr >= remap_base && paddr < remap_base + remap_top)
|
||||
paddr -= remap_base;
|
||||
return paddr;
|
||||
}
|
||||
|
||||
|
||||
/* gnode -> pnode */
|
||||
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
|
||||
{
|
||||
@@ -307,6 +329,15 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
|
||||
return readq(uv_global_mmr64_address(pnode, offset));
|
||||
}
|
||||
|
||||
/*
|
||||
* Global MMR space addresses when referenced by the GRU. (GRU does
|
||||
* NOT use socket addressing).
|
||||
*/
|
||||
static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
|
||||
{
|
||||
return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access hub local MMRs. Faster than using global space but only local MMRs
|
||||
* are accessible.
|
||||
@@ -434,6 +465,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
|
||||
{
|
||||
return (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||
((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||
(mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
|
||||
(vector << UVH_IPI_INT_VECTOR_SHFT);
|
||||
}
|
||||
|
||||
static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
|
||||
{
|
||||
unsigned long val;
|
||||
@@ -442,10 +481,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
|
||||
if (vector == NMI_VECTOR)
|
||||
dmode = dest_NMI;
|
||||
|
||||
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||
((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||
(dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
|
||||
(vector << UVH_IPI_INT_VECTOR_SHFT);
|
||||
val = uv_hub_ipi_value(apicid, vector, dmode);
|
||||
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
|
||||
}
|
||||
|
||||
|
@@ -37,31 +37,4 @@
|
||||
extern struct shared_info *HYPERVISOR_shared_info;
|
||||
extern struct start_info *xen_start_info;
|
||||
|
||||
enum xen_domain_type {
|
||||
XEN_NATIVE, /* running on bare hardware */
|
||||
XEN_PV_DOMAIN, /* running in a PV domain */
|
||||
XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
extern enum xen_domain_type xen_domain_type;
|
||||
#else
|
||||
#define xen_domain_type XEN_NATIVE
|
||||
#endif
|
||||
|
||||
#define xen_domain() (xen_domain_type != XEN_NATIVE)
|
||||
#define xen_pv_domain() (xen_domain() && \
|
||||
xen_domain_type == XEN_PV_DOMAIN)
|
||||
#define xen_hvm_domain() (xen_domain() && \
|
||||
xen_domain_type == XEN_HVM_DOMAIN)
|
||||
|
||||
#ifdef CONFIG_XEN_DOM0
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
#define xen_initial_domain() (xen_pv_domain() && \
|
||||
xen_start_info->flags & SIF_INITDOMAIN)
|
||||
#else /* !CONFIG_XEN_DOM0 */
|
||||
#define xen_initial_domain() (0)
|
||||
#endif /* CONFIG_XEN_DOM0 */
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
@@ -89,7 +89,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_HPET_TIMER) += hpet.o
|
||||
|
||||
obj-$(CONFIG_K8_NB) += k8.o
|
||||
obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
|
||||
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
|
||||
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
|
||||
|
||||
|
@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
|
||||
* P4, Core and beyond CPUs
|
||||
*/
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL &&
|
||||
(c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14)))
|
||||
(c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
|
||||
flags->bm_control = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
|
||||
|
@@ -19,7 +19,7 @@
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
@@ -1162,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
|
||||
|
||||
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
iommu_area_free(range->bitmap, address, pages);
|
||||
bitmap_clear(range->bitmap, address, pages);
|
||||
|
||||
}
|
||||
|
||||
|
@@ -137,6 +137,11 @@ int amd_iommus_present;
|
||||
/* IOMMUs have a non-present cache? */
|
||||
bool amd_iommu_np_cache __read_mostly;
|
||||
|
||||
/*
|
||||
* Set to true if ACPI table parsing and hardware intialization went properly
|
||||
*/
|
||||
static bool amd_iommu_initialized;
|
||||
|
||||
/*
|
||||
* List of protection domains - used during resume
|
||||
*/
|
||||
@@ -929,6 +934,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
||||
}
|
||||
WARN_ON(p != end);
|
||||
|
||||
amd_iommu_initialized = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1263,6 +1270,9 @@ static int __init amd_iommu_init(void)
|
||||
if (acpi_table_parse("IVRS", init_iommu_all) != 0)
|
||||
goto free;
|
||||
|
||||
if (!amd_iommu_initialized)
|
||||
goto free;
|
||||
|
||||
if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
|
||||
goto free;
|
||||
|
||||
@@ -1345,6 +1355,9 @@ void __init amd_iommu_detect(void)
|
||||
iommu_detected = 1;
|
||||
amd_iommu_detected = 1;
|
||||
x86_init.iommu.iommu_init = amd_iommu_init;
|
||||
|
||||
/* Make sure ACS will be enabled */
|
||||
pci_request_acs();
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2432,7 +2432,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
continue;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
goto unlock;
|
||||
@@ -2451,7 +2451,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
}
|
||||
__get_cpu_var(vector_irq)[vector] = -1;
|
||||
unlock:
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
||||
irq_exit();
|
||||
|
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
|
||||
*/
|
||||
|
||||
static DEFINE_PER_CPU(unsigned, last_irq_sum);
|
||||
static DEFINE_PER_CPU(local_t, alert_counter);
|
||||
static DEFINE_PER_CPU(long, alert_counter);
|
||||
static DEFINE_PER_CPU(int, nmi_touch);
|
||||
|
||||
void touch_nmi_watchdog(void)
|
||||
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||
* Ayiee, looks like this CPU is stuck ...
|
||||
* wait a few IRQs (5 seconds) before doing the oops ...
|
||||
*/
|
||||
local_inc(&__get_cpu_var(alert_counter));
|
||||
if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
|
||||
__this_cpu_inc(per_cpu_var(alert_counter));
|
||||
if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
|
||||
/*
|
||||
* die_nmi will return ONLY if NOTIFY_STOP happens..
|
||||
*/
|
||||
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||
regs, panic_on_timeout);
|
||||
} else {
|
||||
__get_cpu_var(last_irq_sum) = sum;
|
||||
local_set(&__get_cpu_var(alert_counter), 0);
|
||||
__this_cpu_write(per_cpu_var(alert_counter), 0);
|
||||
}
|
||||
|
||||
/* see if the nmi watchdog went off */
|
||||
|
@@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
|
||||
}
|
||||
|
||||
int
|
||||
uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
|
||||
uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
|
||||
unsigned long *intr_mmr_offset)
|
||||
{
|
||||
union uv_watchlist_u size_blade;
|
||||
u64 watchlist;
|
||||
s64 ret;
|
||||
|
||||
size_blade.size = mq_size;
|
||||
size_blade.blade = blade;
|
||||
|
||||
/*
|
||||
* bios returns watchlist number or negative error number.
|
||||
*/
|
||||
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
|
||||
size_blade.val, (u64)intr_mmr_offset,
|
||||
mq_size, (u64)intr_mmr_offset,
|
||||
(u64)&watchlist, 0);
|
||||
if (ret < BIOS_STATUS_SUCCESS)
|
||||
return ret;
|
||||
|
@@ -1095,7 +1095,7 @@ static void clear_all_debug_regs(void)
|
||||
|
||||
void __cpuinit cpu_init(void)
|
||||
{
|
||||
struct orig_ist *orig_ist;
|
||||
struct orig_ist *oist;
|
||||
struct task_struct *me;
|
||||
struct tss_struct *t;
|
||||
unsigned long v;
|
||||
@@ -1104,7 +1104,7 @@ void __cpuinit cpu_init(void)
|
||||
|
||||
cpu = stack_smp_processor_id();
|
||||
t = &per_cpu(init_tss, cpu);
|
||||
orig_ist = &per_cpu(orig_ist, cpu);
|
||||
oist = &per_cpu(orig_ist, cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
if (cpu != 0 && percpu_read(node_number) == 0 &&
|
||||
@@ -1145,12 +1145,12 @@ void __cpuinit cpu_init(void)
|
||||
/*
|
||||
* set up and load the per-CPU TSS
|
||||
*/
|
||||
if (!orig_ist->ist[0]) {
|
||||
if (!oist->ist[0]) {
|
||||
char *estacks = per_cpu(exception_stacks, cpu);
|
||||
|
||||
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
|
||||
estacks += exception_stack_sizes[v];
|
||||
orig_ist->ist[v] = t->x86_tss.ist[v] =
|
||||
oist->ist[v] = t->x86_tss.ist[v] =
|
||||
(unsigned long)estacks;
|
||||
}
|
||||
}
|
||||
|
@@ -30,9 +30,9 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
|
||||
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
|
||||
static DEFINE_PER_CPU(int, cpu_priv_count);
|
||||
static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
|
||||
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
|
||||
static DEFINE_PER_CPU(int, cpud_priv_count);
|
||||
|
||||
static DEFINE_MUTEX(cpu_debug_lock);
|
||||
|
||||
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
|
||||
|
||||
/* Already intialized */
|
||||
if (file == CPU_INDEX_BIT)
|
||||
if (per_cpu(cpu_arr[type].init, cpu))
|
||||
if (per_cpu(cpud_arr[type].init, cpu))
|
||||
return 0;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
|
||||
priv->reg = reg;
|
||||
priv->file = file;
|
||||
mutex_lock(&cpu_debug_lock);
|
||||
per_cpu(priv_arr[type], cpu) = priv;
|
||||
per_cpu(cpu_priv_count, cpu)++;
|
||||
per_cpu(cpud_priv_arr[type], cpu) = priv;
|
||||
per_cpu(cpud_priv_count, cpu)++;
|
||||
mutex_unlock(&cpu_debug_lock);
|
||||
|
||||
if (file)
|
||||
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
|
||||
dentry, (void *)priv, &cpu_fops);
|
||||
else {
|
||||
debugfs_create_file(cpu_base[type].name, S_IRUGO,
|
||||
per_cpu(cpu_arr[type].dentry, cpu),
|
||||
per_cpu(cpud_arr[type].dentry, cpu),
|
||||
(void *)priv, &cpu_fops);
|
||||
mutex_lock(&cpu_debug_lock);
|
||||
per_cpu(cpu_arr[type].init, cpu) = 1;
|
||||
per_cpu(cpud_arr[type].init, cpu) = 1;
|
||||
mutex_unlock(&cpu_debug_lock);
|
||||
}
|
||||
|
||||
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
|
||||
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
|
||||
continue;
|
||||
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
|
||||
per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
|
||||
per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
|
||||
|
||||
if (type < CPU_TSS_BIT)
|
||||
err = cpu_init_msr(cpu, type, cpu_dentry);
|
||||
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
|
||||
err = cpu_init_allreg(cpu, cpu_dentry);
|
||||
|
||||
pr_info("cpu%d(%d) debug files %d\n",
|
||||
cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
|
||||
if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
|
||||
cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
|
||||
if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
|
||||
pr_err("Register files count %d exceeds limit %d\n",
|
||||
per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
|
||||
per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
|
||||
per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
|
||||
per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
|
||||
err = -ENFILE;
|
||||
}
|
||||
if (err)
|
||||
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
|
||||
debugfs_remove_recursive(cpu_debugfs_dir);
|
||||
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
|
||||
kfree(per_cpu(priv_arr[i], cpu));
|
||||
for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
|
||||
kfree(per_cpu(cpud_priv_arr[i], cpu));
|
||||
}
|
||||
|
||||
module_init(cpu_debug_init);
|
||||
|
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
|
||||
unsigned int cpu_feature;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
|
||||
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
|
||||
|
||||
static DEFINE_PER_CPU(struct aperfmperf, old_perf);
|
||||
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
|
||||
|
||||
/* acpi_perf_data is a pointer to percpu data. */
|
||||
static struct acpi_processor_performance *acpi_perf_data;
|
||||
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||
if (unlikely(cpumask_empty(mask)))
|
||||
return 0;
|
||||
|
||||
switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
|
||||
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
cmd.type = SYSTEM_IO_CAPABLE;
|
||||
perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
|
||||
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
|
||||
cmd.addr.io.port = perf->control_register.address;
|
||||
cmd.addr.io.bit_width = perf->control_register.bit_width;
|
||||
break;
|
||||
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
|
||||
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
|
||||
return 0;
|
||||
|
||||
ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
|
||||
per_cpu(old_perf, cpu) = perf;
|
||||
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
|
||||
per_cpu(acfreq_old_perf, cpu) = perf;
|
||||
|
||||
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
|
||||
|
||||
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
|
||||
|
||||
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
|
||||
unsigned int freq;
|
||||
unsigned int cached_freq;
|
||||
|
||||
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
|
||||
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
struct acpi_processor_performance *perf;
|
||||
struct cpufreq_freqs freqs;
|
||||
struct drv_cmd cmd;
|
||||
@@ -416,7 +416,7 @@ out:
|
||||
|
||||
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
|
||||
dprintk("acpi_cpufreq_verify\n");
|
||||
|
||||
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
return -ENOMEM;
|
||||
|
||||
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
|
||||
per_cpu(drv_data, cpu) = data;
|
||||
per_cpu(acfreq_data, cpu) = data;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
|
||||
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
|
||||
@@ -725,20 +725,20 @@ err_unreg:
|
||||
acpi_processor_unregister_performance(perf, cpu);
|
||||
err_free:
|
||||
kfree(data);
|
||||
per_cpu(drv_data, cpu) = NULL;
|
||||
per_cpu(acfreq_data, cpu) = NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_exit\n");
|
||||
|
||||
if (data) {
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
per_cpu(drv_data, policy->cpu) = NULL;
|
||||
per_cpu(acfreq_data, policy->cpu) = NULL;
|
||||
acpi_processor_unregister_performance(data->acpi_data,
|
||||
policy->cpu);
|
||||
kfree(data);
|
||||
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
|
||||
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
|
||||
dprintk("acpi_cpufreq_resume\n");
|
||||
|
||||
@@ -764,14 +764,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
|
||||
};
|
||||
|
||||
static struct cpufreq_driver acpi_cpufreq_driver = {
|
||||
.verify = acpi_cpufreq_verify,
|
||||
.target = acpi_cpufreq_target,
|
||||
.init = acpi_cpufreq_cpu_init,
|
||||
.exit = acpi_cpufreq_cpu_exit,
|
||||
.resume = acpi_cpufreq_resume,
|
||||
.name = "acpi-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = acpi_cpufreq_attr,
|
||||
.verify = acpi_cpufreq_verify,
|
||||
.target = acpi_cpufreq_target,
|
||||
.bios_limit = acpi_processor_get_bios_limit,
|
||||
.init = acpi_cpufreq_cpu_init,
|
||||
.exit = acpi_cpufreq_cpu_exit,
|
||||
.resume = acpi_cpufreq_resume,
|
||||
.name = "acpi-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
static int __init acpi_cpufreq_init(void)
|
||||
|
@@ -164,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
|
||||
}
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
policy->cpuinfo.transition_latency = 200000;
|
||||
policy->cur = busfreq * max_multiplier;
|
||||
|
||||
result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
|
||||
|
@@ -714,14 +714,17 @@ static struct freq_attr *powernow_table_attr[] = {
|
||||
};
|
||||
|
||||
static struct cpufreq_driver powernow_driver = {
|
||||
.verify = powernow_verify,
|
||||
.target = powernow_target,
|
||||
.get = powernow_get,
|
||||
.init = powernow_cpu_init,
|
||||
.exit = powernow_cpu_exit,
|
||||
.name = "powernow-k7",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_table_attr,
|
||||
.verify = powernow_verify,
|
||||
.target = powernow_target,
|
||||
.get = powernow_get,
|
||||
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
|
||||
.bios_limit = acpi_processor_get_bios_limit,
|
||||
#endif
|
||||
.init = powernow_cpu_init,
|
||||
.exit = powernow_cpu_exit,
|
||||
.name = "powernow-k7",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_table_attr,
|
||||
};
|
||||
|
||||
static int __init powernow_init(void)
|
||||
|
@@ -1118,7 +1118,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
|
||||
static int powernowk8_target(struct cpufreq_policy *pol,
|
||||
unsigned targfreq, unsigned relation)
|
||||
{
|
||||
cpumask_t oldmask;
|
||||
cpumask_var_t oldmask;
|
||||
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
|
||||
u32 checkfid;
|
||||
u32 checkvid;
|
||||
@@ -1131,9 +1131,13 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
||||
checkfid = data->currfid;
|
||||
checkvid = data->currvid;
|
||||
|
||||
/* only run on specific CPU from here on */
|
||||
oldmask = current->cpus_allowed;
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
|
||||
/* only run on specific CPU from here on. */
|
||||
/* This is poor form: use a workqueue or smp_call_function_single */
|
||||
if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
cpumask_copy(oldmask, tsk_cpus_allowed(current));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
|
||||
|
||||
if (smp_processor_id() != pol->cpu) {
|
||||
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
|
||||
@@ -1193,7 +1197,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
||||
ret = 0;
|
||||
|
||||
err_out:
|
||||
set_cpus_allowed_ptr(current, &oldmask);
|
||||
set_cpus_allowed_ptr(current, oldmask);
|
||||
free_cpumask_var(oldmask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1393,14 +1398,15 @@ static struct freq_attr *powernow_k8_attr[] = {
|
||||
};
|
||||
|
||||
static struct cpufreq_driver cpufreq_amd64_driver = {
|
||||
.verify = powernowk8_verify,
|
||||
.target = powernowk8_target,
|
||||
.init = powernowk8_cpu_init,
|
||||
.exit = __devexit_p(powernowk8_cpu_exit),
|
||||
.get = powernowk8_get,
|
||||
.name = "powernow-k8",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_k8_attr,
|
||||
.verify = powernowk8_verify,
|
||||
.target = powernowk8_target,
|
||||
.bios_limit = acpi_processor_get_bios_limit,
|
||||
.init = powernowk8_cpu_init,
|
||||
.exit = __devexit_p(powernowk8_cpu_exit),
|
||||
.get = powernowk8_get,
|
||||
.name = "powernow-k8",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_k8_attr,
|
||||
};
|
||||
|
||||
/* driver entry point for init */
|
||||
|
@@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev;
|
||||
|
||||
/* speedstep_processor
|
||||
*/
|
||||
static unsigned int speedstep_processor;
|
||||
static enum speedstep_processor speedstep_processor;
|
||||
|
||||
static u32 pmbase;
|
||||
|
||||
|
@@ -34,7 +34,7 @@ static int relaxed_check;
|
||||
* GET PROCESSOR CORE SPEED IN KHZ *
|
||||
*********************************************************************/
|
||||
|
||||
static unsigned int pentium3_get_frequency(unsigned int processor)
|
||||
static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
|
||||
{
|
||||
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
|
||||
struct {
|
||||
@@ -227,7 +227,7 @@ static unsigned int pentium4_get_frequency(void)
|
||||
|
||||
|
||||
/* Warning: may get called from smp_call_function_single. */
|
||||
unsigned int speedstep_get_frequency(unsigned int processor)
|
||||
unsigned int speedstep_get_frequency(enum speedstep_processor processor)
|
||||
{
|
||||
switch (processor) {
|
||||
case SPEEDSTEP_CPU_PCORE:
|
||||
@@ -380,7 +380,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
|
||||
* DETECT SPEEDSTEP SPEEDS *
|
||||
*********************************************************************/
|
||||
|
||||
unsigned int speedstep_get_freqs(unsigned int processor,
|
||||
unsigned int speedstep_get_freqs(enum speedstep_processor processor,
|
||||
unsigned int *low_speed,
|
||||
unsigned int *high_speed,
|
||||
unsigned int *transition_latency,
|
||||
|
@@ -11,18 +11,18 @@
|
||||
|
||||
|
||||
/* processors */
|
||||
|
||||
#define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */
|
||||
#define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */
|
||||
#define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */
|
||||
#define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */
|
||||
|
||||
enum speedstep_processor {
|
||||
SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
|
||||
SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
|
||||
SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
|
||||
SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
|
||||
/* the following processors are not speedstep-capable and are not auto-detected
|
||||
* in speedstep_detect_processor(). However, their speed can be detected using
|
||||
* the speedstep_get_frequency() call. */
|
||||
#define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */
|
||||
#define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */
|
||||
#define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */
|
||||
SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
|
||||
SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
|
||||
SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
|
||||
};
|
||||
|
||||
/* speedstep states -- only two of them */
|
||||
|
||||
@@ -31,10 +31,10 @@
|
||||
|
||||
|
||||
/* detect a speedstep-capable processor */
|
||||
extern unsigned int speedstep_detect_processor (void);
|
||||
extern enum speedstep_processor speedstep_detect_processor(void);
|
||||
|
||||
/* detect the current speed (in khz) of the processor */
|
||||
extern unsigned int speedstep_get_frequency(unsigned int processor);
|
||||
extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
|
||||
|
||||
|
||||
/* detect the low and high speeds of the processor. The callback
|
||||
@@ -42,7 +42,7 @@ extern unsigned int speedstep_get_frequency(unsigned int processor);
|
||||
* SPEEDSTEP_LOW; the second argument is zero so that no
|
||||
* cpufreq_notify_transition calls are initiated.
|
||||
*/
|
||||
extern unsigned int speedstep_get_freqs(unsigned int processor,
|
||||
extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
|
||||
unsigned int *low_speed,
|
||||
unsigned int *high_speed,
|
||||
unsigned int *transition_latency,
|
||||
|
@@ -35,7 +35,7 @@ static int smi_cmd;
|
||||
static unsigned int smi_sig;
|
||||
|
||||
/* info about the processor */
|
||||
static unsigned int speedstep_processor;
|
||||
static enum speedstep_processor speedstep_processor;
|
||||
|
||||
/*
|
||||
* There are only two frequency states for each processor. Values
|
||||
|
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
/* pointer to _cpuid4_info array (for each cache leaf) */
|
||||
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
|
||||
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
|
||||
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
|
||||
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
@@ -512,7 +512,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
|
||||
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
|
||||
for_each_cpu(i, c->llc_shared_map) {
|
||||
if (!per_cpu(cpuid4_info, i))
|
||||
if (!per_cpu(ici_cpuid4_info, i))
|
||||
continue;
|
||||
this_leaf = CPUID4_INFO_IDX(i, index);
|
||||
for_each_cpu(sibling, c->llc_shared_map) {
|
||||
@@ -536,7 +536,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
c->apicid >> index_msb) {
|
||||
cpumask_set_cpu(i,
|
||||
to_cpumask(this_leaf->shared_cpu_map));
|
||||
if (i != cpu && per_cpu(cpuid4_info, i)) {
|
||||
if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
|
||||
sibling_leaf =
|
||||
CPUID4_INFO_IDX(i, index);
|
||||
cpumask_set_cpu(cpu, to_cpumask(
|
||||
@@ -575,8 +575,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||
for (i = 0; i < num_cache_leaves; i++)
|
||||
cache_remove_shared_cpu_map(cpu, i);
|
||||
|
||||
kfree(per_cpu(cpuid4_info, cpu));
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
kfree(per_cpu(ici_cpuid4_info, cpu));
|
||||
per_cpu(ici_cpuid4_info, cpu) = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -615,15 +615,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||
if (num_cache_leaves == 0)
|
||||
return -ENOENT;
|
||||
|
||||
per_cpu(cpuid4_info, cpu) = kzalloc(
|
||||
per_cpu(ici_cpuid4_info, cpu) = kzalloc(
|
||||
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
|
||||
if (per_cpu(cpuid4_info, cpu) == NULL)
|
||||
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
|
||||
if (retval) {
|
||||
kfree(per_cpu(cpuid4_info, cpu));
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
kfree(per_cpu(ici_cpuid4_info, cpu));
|
||||
per_cpu(ici_cpuid4_info, cpu) = NULL;
|
||||
}
|
||||
|
||||
return retval;
|
||||
@@ -635,7 +635,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
|
||||
|
||||
/* pointer to kobject for cpuX/cache */
|
||||
static DEFINE_PER_CPU(struct kobject *, cache_kobject);
|
||||
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
|
||||
|
||||
struct _index_kobject {
|
||||
struct kobject kobj;
|
||||
@@ -644,8 +644,8 @@ struct _index_kobject {
|
||||
};
|
||||
|
||||
/* pointer to array of kobjects for cpuX/cache/indexY */
|
||||
static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
|
||||
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
|
||||
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
|
||||
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
|
||||
|
||||
#define show_one_plus(file_name, object, val) \
|
||||
static ssize_t show_##file_name \
|
||||
@@ -864,10 +864,10 @@ static struct kobj_type ktype_percpu_entry = {
|
||||
|
||||
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
|
||||
{
|
||||
kfree(per_cpu(cache_kobject, cpu));
|
||||
kfree(per_cpu(index_kobject, cpu));
|
||||
per_cpu(cache_kobject, cpu) = NULL;
|
||||
per_cpu(index_kobject, cpu) = NULL;
|
||||
kfree(per_cpu(ici_cache_kobject, cpu));
|
||||
kfree(per_cpu(ici_index_kobject, cpu));
|
||||
per_cpu(ici_cache_kobject, cpu) = NULL;
|
||||
per_cpu(ici_index_kobject, cpu) = NULL;
|
||||
free_cache_attributes(cpu);
|
||||
}
|
||||
|
||||
@@ -883,14 +883,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
|
||||
return err;
|
||||
|
||||
/* Allocate all required memory */
|
||||
per_cpu(cache_kobject, cpu) =
|
||||
per_cpu(ici_cache_kobject, cpu) =
|
||||
kzalloc(sizeof(struct kobject), GFP_KERNEL);
|
||||
if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
|
||||
if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
|
||||
goto err_out;
|
||||
|
||||
per_cpu(index_kobject, cpu) = kzalloc(
|
||||
per_cpu(ici_index_kobject, cpu) = kzalloc(
|
||||
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
||||
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
|
||||
if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
@@ -914,7 +914,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||
if (unlikely(retval < 0))
|
||||
return retval;
|
||||
|
||||
retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
|
||||
retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
|
||||
&ktype_percpu_entry,
|
||||
&sys_dev->kobj, "%s", "cache");
|
||||
if (retval < 0) {
|
||||
@@ -928,12 +928,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||
this_object->index = i;
|
||||
retval = kobject_init_and_add(&(this_object->kobj),
|
||||
&ktype_cache,
|
||||
per_cpu(cache_kobject, cpu),
|
||||
per_cpu(ici_cache_kobject, cpu),
|
||||
"index%1lu", i);
|
||||
if (unlikely(retval)) {
|
||||
for (j = 0; j < i; j++)
|
||||
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
||||
kobject_put(per_cpu(cache_kobject, cpu));
|
||||
kobject_put(per_cpu(ici_cache_kobject, cpu));
|
||||
cpuid4_cache_sysfs_exit(cpu);
|
||||
return retval;
|
||||
}
|
||||
@@ -941,7 +941,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||
}
|
||||
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
|
||||
|
||||
kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
|
||||
kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -950,7 +950,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
||||
unsigned int cpu = sys_dev->id;
|
||||
unsigned long i;
|
||||
|
||||
if (per_cpu(cpuid4_info, cpu) == NULL)
|
||||
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
||||
return;
|
||||
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
|
||||
return;
|
||||
@@ -958,7 +958,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
||||
|
||||
for (i = 0; i < num_cache_leaves; i++)
|
||||
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
||||
kobject_put(per_cpu(cache_kobject, cpu));
|
||||
kobject_put(per_cpu(ici_cache_kobject, cpu));
|
||||
cpuid4_cache_sysfs_exit(cpu);
|
||||
}
|
||||
|
||||
|
@@ -4,6 +4,7 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#define LINE_SIZE 80
|
||||
@@ -133,8 +134,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
|
||||
return -EINVAL;
|
||||
|
||||
base = simple_strtoull(line + 5, &ptr, 0);
|
||||
while (isspace(*ptr))
|
||||
ptr++;
|
||||
ptr = skip_spaces(ptr);
|
||||
|
||||
if (strncmp(ptr, "size=", 5))
|
||||
return -EINVAL;
|
||||
@@ -142,14 +142,11 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
|
||||
size = simple_strtoull(ptr + 5, &ptr, 0);
|
||||
if ((base & 0xfff) || (size & 0xfff))
|
||||
return -EINVAL;
|
||||
while (isspace(*ptr))
|
||||
ptr++;
|
||||
ptr = skip_spaces(ptr);
|
||||
|
||||
if (strncmp(ptr, "type=", 5))
|
||||
return -EINVAL;
|
||||
ptr += 5;
|
||||
while (isspace(*ptr))
|
||||
ptr++;
|
||||
ptr = skip_spaces(ptr + 5);
|
||||
|
||||
for (i = 0; i < MTRR_NUM_TYPES; ++i) {
|
||||
if (strcmp(ptr, mtrr_strings[i]))
|
||||
|
@@ -1632,6 +1632,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
|
||||
|
||||
data.period = event->hw.last_period;
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
regs.ip = 0;
|
||||
|
||||
/*
|
||||
@@ -1749,6 +1750,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
|
||||
u64 val;
|
||||
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
@@ -1794,6 +1796,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
u64 ack, status;
|
||||
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
@@ -1857,6 +1860,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
|
||||
u64 val;
|
||||
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
@@ -2062,12 +2066,6 @@ static __init int p6_pmu_init(void)
|
||||
|
||||
x86_pmu = p6_pmu;
|
||||
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
|
||||
pr_info("no hardware sampling interrupt available.\n");
|
||||
x86_pmu.apic = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2159,6 +2157,16 @@ static __init int amd_pmu_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init pmu_check_apic(void)
|
||||
{
|
||||
if (cpu_has_apic)
|
||||
return;
|
||||
|
||||
x86_pmu.apic = 0;
|
||||
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
|
||||
pr_info("no hardware sampling interrupt available.\n");
|
||||
}
|
||||
|
||||
void __init init_hw_perf_events(void)
|
||||
{
|
||||
int err;
|
||||
@@ -2180,6 +2188,8 @@ void __init init_hw_perf_events(void)
|
||||
return;
|
||||
}
|
||||
|
||||
pmu_check_apic();
|
||||
|
||||
pr_cont("%s PMU driver.\n", x86_pmu.name);
|
||||
|
||||
if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
|
||||
@@ -2287,7 +2297,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
|
||||
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
||||
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
|
||||
static DEFINE_PER_CPU(int, in_nmi_frame);
|
||||
static DEFINE_PER_CPU(int, in_ignored_frame);
|
||||
|
||||
|
||||
static void
|
||||
@@ -2303,8 +2313,9 @@ static void backtrace_warning(void *data, char *msg)
|
||||
|
||||
static int backtrace_stack(void *data, char *name)
|
||||
{
|
||||
per_cpu(in_nmi_frame, smp_processor_id()) =
|
||||
x86_is_stack_id(NMI_STACK, name);
|
||||
per_cpu(in_ignored_frame, smp_processor_id()) =
|
||||
x86_is_stack_id(NMI_STACK, name) ||
|
||||
x86_is_stack_id(DEBUG_STACK, name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2313,7 +2324,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
|
||||
if (per_cpu(in_nmi_frame, smp_processor_id()))
|
||||
if (per_cpu(in_ignored_frame, smp_processor_id()))
|
||||
return;
|
||||
|
||||
if (reliable)
|
||||
|
@@ -265,13 +265,13 @@ struct ds_context {
|
||||
int cpu;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ds_context *, cpu_context);
|
||||
static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
|
||||
|
||||
|
||||
static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
|
||||
{
|
||||
struct ds_context **p_context =
|
||||
(task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
|
||||
(task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
|
||||
struct ds_context *context = NULL;
|
||||
struct ds_context *new_context = NULL;
|
||||
|
||||
|
@@ -188,7 +188,7 @@ void dump_stack(void)
|
||||
}
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static int die_owner = -1;
|
||||
static unsigned int die_nest_count;
|
||||
|
||||
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
|
||||
/* racy, but better than risking deadlock. */
|
||||
raw_local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
if (!__raw_spin_trylock(&die_lock)) {
|
||||
if (!arch_spin_trylock(&die_lock)) {
|
||||
if (cpu == die_owner)
|
||||
/* nested oops. should stop eventually */;
|
||||
else
|
||||
__raw_spin_lock(&die_lock);
|
||||
arch_spin_lock(&die_lock);
|
||||
}
|
||||
die_nest_count++;
|
||||
die_owner = cpu;
|
||||
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
||||
die_nest_count--;
|
||||
if (!die_nest_count)
|
||||
/* Nest count reaches zero, release the lock. */
|
||||
__raw_spin_unlock(&die_lock);
|
||||
arch_spin_unlock(&die_lock);
|
||||
raw_local_irq_restore(flags);
|
||||
oops_exit();
|
||||
|
||||
|
@@ -103,6 +103,35 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
|
||||
unsigned long *irq_stack_end)
|
||||
{
|
||||
return (stack >= irq_stack && stack < irq_stack_end);
|
||||
}
|
||||
|
||||
/*
|
||||
* We are returning from the irq stack and go to the previous one.
|
||||
* If the previous stack is also in the irq stack, then bp in the first
|
||||
* frame of the irq stack points to the previous, interrupted one.
|
||||
* Otherwise we have another level of indirection: We first save
|
||||
* the bp of the previous stack, then we switch the stack to the irq one
|
||||
* and save a new bp that links to the previous one.
|
||||
* (See save_args())
|
||||
*/
|
||||
static inline unsigned long
|
||||
fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
|
||||
unsigned long *irq_stack, unsigned long *irq_stack_end)
|
||||
{
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
|
||||
if (!in_irq_stack(stack, irq_stack, irq_stack_end))
|
||||
return (unsigned long)frame->next_frame;
|
||||
#endif
|
||||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* x86-64 can have up to three kernel stacks:
|
||||
* process stack
|
||||
@@ -175,7 +204,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
irq_stack = irq_stack_end -
|
||||
(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
|
||||
|
||||
if (stack >= irq_stack && stack < irq_stack_end) {
|
||||
if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
|
||||
if (ops->stack(data, "IRQ") < 0)
|
||||
break;
|
||||
bp = print_context_stack(tinfo, stack, bp,
|
||||
@@ -186,6 +215,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
* pointer (index -1 to end) in the IRQ stack:
|
||||
*/
|
||||
stack = (unsigned long *) (irq_stack_end[-1]);
|
||||
bp = fixup_bp_irq_link(bp, stack, irq_stack,
|
||||
irq_stack_end);
|
||||
irq_stack_end = NULL;
|
||||
ops->stack(data, "EOI");
|
||||
continue;
|
||||
|
@@ -1076,10 +1076,10 @@ ENTRY(\sym)
|
||||
TRACE_IRQS_OFF
|
||||
movq %rsp,%rdi /* pt_regs pointer */
|
||||
xorl %esi,%esi /* no error code */
|
||||
PER_CPU(init_tss, %rbp)
|
||||
subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
|
||||
PER_CPU(init_tss, %r12)
|
||||
subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
|
||||
call \do_sym
|
||||
addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
|
||||
addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
|
||||
jmp paranoid_exit /* %ebx: no swapgs flag */
|
||||
CFI_ENDPROC
|
||||
END(\sym)
|
||||
|
@@ -1,196 +0,0 @@
|
||||
/*
|
||||
* AMD Geode southbridge support code
|
||||
* Copyright (C) 2006, Advanced Micro Devices, Inc.
|
||||
* Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/geode.h>
|
||||
|
||||
static struct {
|
||||
char *name;
|
||||
u32 msr;
|
||||
int size;
|
||||
u32 base;
|
||||
} lbars[] = {
|
||||
{ "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
|
||||
{ "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
|
||||
{ "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
|
||||
{ "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
|
||||
};
|
||||
|
||||
static void __init init_lbars(void)
|
||||
{
|
||||
u32 lo, hi;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lbars); i++) {
|
||||
rdmsr(lbars[i].msr, lo, hi);
|
||||
if (hi & 0x01)
|
||||
lbars[i].base = lo & 0x0000ffff;
|
||||
|
||||
if (lbars[i].base == 0)
|
||||
printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
|
||||
lbars[i].name);
|
||||
}
|
||||
}
|
||||
|
||||
int geode_get_dev_base(unsigned int dev)
|
||||
{
|
||||
BUG_ON(dev >= ARRAY_SIZE(lbars));
|
||||
return lbars[dev].base;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_get_dev_base);
|
||||
|
||||
/* === GPIO API === */
|
||||
|
||||
void geode_gpio_set(u32 gpio, unsigned int reg)
|
||||
{
|
||||
u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
/* low bank register */
|
||||
if (gpio & 0xFFFF)
|
||||
outl(gpio & 0xFFFF, base + reg);
|
||||
/* high bank register */
|
||||
gpio >>= 16;
|
||||
if (gpio)
|
||||
outl(gpio, base + 0x80 + reg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_gpio_set);
|
||||
|
||||
void geode_gpio_clear(u32 gpio, unsigned int reg)
|
||||
{
|
||||
u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
/* low bank register */
|
||||
if (gpio & 0xFFFF)
|
||||
outl((gpio & 0xFFFF) << 16, base + reg);
|
||||
/* high bank register */
|
||||
gpio &= (0xFFFF << 16);
|
||||
if (gpio)
|
||||
outl(gpio, base + 0x80 + reg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_gpio_clear);
|
||||
|
||||
int geode_gpio_isset(u32 gpio, unsigned int reg)
|
||||
{
|
||||
u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
|
||||
u32 val;
|
||||
|
||||
if (!base)
|
||||
return 0;
|
||||
|
||||
/* low bank register */
|
||||
if (gpio & 0xFFFF) {
|
||||
val = inl(base + reg) & (gpio & 0xFFFF);
|
||||
if ((gpio & 0xFFFF) == val)
|
||||
return 1;
|
||||
}
|
||||
/* high bank register */
|
||||
gpio >>= 16;
|
||||
if (gpio) {
|
||||
val = inl(base + 0x80 + reg) & gpio;
|
||||
if (gpio == val)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_gpio_isset);
|
||||
|
||||
void geode_gpio_set_irq(unsigned int group, unsigned int irq)
|
||||
{
|
||||
u32 lo, hi;
|
||||
|
||||
if (group > 7 || irq > 15)
|
||||
return;
|
||||
|
||||
rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
|
||||
|
||||
lo &= ~(0xF << (group * 4));
|
||||
lo |= (irq & 0xF) << (group * 4);
|
||||
|
||||
wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
|
||||
|
||||
void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
|
||||
{
|
||||
u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
|
||||
u32 offset, shift, val;
|
||||
|
||||
if (gpio >= 24)
|
||||
offset = GPIO_MAP_W;
|
||||
else if (gpio >= 16)
|
||||
offset = GPIO_MAP_Z;
|
||||
else if (gpio >= 8)
|
||||
offset = GPIO_MAP_Y;
|
||||
else
|
||||
offset = GPIO_MAP_X;
|
||||
|
||||
shift = (gpio % 8) * 4;
|
||||
|
||||
val = inl(base + offset);
|
||||
|
||||
/* Clear whatever was there before */
|
||||
val &= ~(0xF << shift);
|
||||
|
||||
/* And set the new value */
|
||||
|
||||
val |= ((pair & 7) << shift);
|
||||
|
||||
/* Set the PME bit if this is a PME event */
|
||||
|
||||
if (pme)
|
||||
val |= (1 << (shift + 3));
|
||||
|
||||
outl(val, base + offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
|
||||
|
||||
int geode_has_vsa2(void)
|
||||
{
|
||||
static int has_vsa2 = -1;
|
||||
|
||||
if (has_vsa2 == -1) {
|
||||
u16 val;
|
||||
|
||||
/*
|
||||
* The VSA has virtual registers that we can query for a
|
||||
* signature.
|
||||
*/
|
||||
outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
|
||||
outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
|
||||
|
||||
val = inw(VSA_VRC_DATA);
|
||||
has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
|
||||
}
|
||||
|
||||
return has_vsa2;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_has_vsa2);
|
||||
|
||||
static int __init geode_southbridge_init(void)
|
||||
{
|
||||
if (!is_geode())
|
||||
return -ENODEV;
|
||||
|
||||
init_lbars();
|
||||
(void) mfgpt_timer_setup();
|
||||
return 0;
|
||||
}
|
||||
|
||||
postcore_initcall(geode_southbridge_init);
|
@@ -362,8 +362,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (bp->callback)
|
||||
ret = arch_store_info(bp);
|
||||
ret = arch_store_info(bp);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -519,7 +518,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
|
||||
break;
|
||||
}
|
||||
|
||||
(bp->callback)(bp, args->regs);
|
||||
perf_bp_event(bp, args->regs);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_irqs_cpu(i, j);
|
||||
action = desc->action;
|
||||
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
|
||||
seq_putc(p, '\n');
|
||||
out:
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -294,12 +294,12 @@ void fixup_irqs(void)
|
||||
continue;
|
||||
|
||||
/* interrupt's are disabled at this point */
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
affinity = desc->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
cpumask_equal(affinity, cpu_online_mask)) {
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -326,7 +326,7 @@ void fixup_irqs(void)
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
if (break_affinity && set_affinity)
|
||||
printk("Broke affinity for irq %i\n", irq);
|
||||
@@ -356,10 +356,10 @@ void fixup_irqs(void)
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
if (desc->chip->retrigger)
|
||||
desc->chip->retrigger(irq);
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -86,9 +86,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
||||
gdb_regs[GDB_DS] = regs->ds;
|
||||
gdb_regs[GDB_ES] = regs->es;
|
||||
gdb_regs[GDB_CS] = regs->cs;
|
||||
gdb_regs[GDB_SS] = __KERNEL_DS;
|
||||
gdb_regs[GDB_FS] = 0xFFFF;
|
||||
gdb_regs[GDB_GS] = 0xFFFF;
|
||||
if (user_mode_vm(regs)) {
|
||||
gdb_regs[GDB_SS] = regs->ss;
|
||||
gdb_regs[GDB_SP] = regs->sp;
|
||||
} else {
|
||||
gdb_regs[GDB_SS] = __KERNEL_DS;
|
||||
gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
|
||||
}
|
||||
#else
|
||||
gdb_regs[GDB_R8] = regs->r8;
|
||||
gdb_regs[GDB_R9] = regs->r9;
|
||||
@@ -101,8 +107,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
||||
gdb_regs32[GDB_PS] = regs->flags;
|
||||
gdb_regs32[GDB_CS] = regs->cs;
|
||||
gdb_regs32[GDB_SS] = regs->ss;
|
||||
#endif
|
||||
gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -220,8 +226,7 @@ static void kgdb_correct_hw_break(void)
|
||||
dr7 |= ((breakinfo[breakno].len << 2) |
|
||||
breakinfo[breakno].type) <<
|
||||
((breakno << 2) + 16);
|
||||
if (breakno >= 0 && breakno <= 3)
|
||||
set_debugreg(breakinfo[breakno].addr, breakno);
|
||||
set_debugreg(breakinfo[breakno].addr, breakno);
|
||||
|
||||
} else {
|
||||
if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
|
||||
@@ -395,7 +400,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
||||
/* set the trace bit if we're stepping */
|
||||
if (remcomInBuffer[0] == 's') {
|
||||
linux_regs->flags |= X86_EFLAGS_TF;
|
||||
kgdb_single_step = 1;
|
||||
atomic_set(&kgdb_cpu_doing_single_step,
|
||||
raw_smp_processor_id());
|
||||
}
|
||||
|
@@ -1,410 +0,0 @@
|
||||
/*
|
||||
* Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
|
||||
*
|
||||
* Copyright (C) 2006, Advanced Micro Devices, Inc.
|
||||
* Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
|
||||
*/
|
||||
|
||||
/*
|
||||
* We are using the 32.768kHz input clock - it's the only one that has the
|
||||
* ranges we find desirable. The following table lists the suitable
|
||||
* divisors and the associated Hz, minimum interval and the maximum interval:
|
||||
*
|
||||
* Divisor Hz Min Delta (s) Max Delta (s)
|
||||
* 1 32768 .00048828125 2.000
|
||||
* 2 16384 .0009765625 4.000
|
||||
* 4 8192 .001953125 8.000
|
||||
* 8 4096 .00390625 16.000
|
||||
* 16 2048 .0078125 32.000
|
||||
* 32 1024 .015625 64.000
|
||||
* 64 512 .03125 128.000
|
||||
* 128 256 .0625 256.000
|
||||
* 256 128 .125 512.000
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/geode.h>
|
||||
|
||||
#define MFGPT_DEFAULT_IRQ 7
|
||||
|
||||
static struct mfgpt_timer_t {
|
||||
unsigned int avail:1;
|
||||
} mfgpt_timers[MFGPT_MAX_TIMERS];
|
||||
|
||||
/* Selected from the table above */
|
||||
|
||||
#define MFGPT_DIVISOR 16
|
||||
#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
|
||||
#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
|
||||
#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
|
||||
|
||||
/* Allow for disabling of MFGPTs */
|
||||
static int disable;
|
||||
static int __init mfgpt_disable(char *s)
|
||||
{
|
||||
disable = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("nomfgpt", mfgpt_disable);
|
||||
|
||||
/* Reset the MFGPT timers. This is required by some broken BIOSes which already
|
||||
* do the same and leave the system in an unstable state. TinyBIOS 0.98 is
|
||||
* affected at least (0.99 is OK with MFGPT workaround left to off).
|
||||
*/
|
||||
static int __init mfgpt_fix(char *s)
|
||||
{
|
||||
u32 val, dummy;
|
||||
|
||||
/* The following udocumented bit resets the MFGPT timers */
|
||||
val = 0xFF; dummy = 0;
|
||||
wrmsr(MSR_MFGPT_SETUP, val, dummy);
|
||||
return 1;
|
||||
}
|
||||
__setup("mfgptfix", mfgpt_fix);
|
||||
|
||||
/*
|
||||
* Check whether any MFGPTs are available for the kernel to use. In most
|
||||
* cases, firmware that uses AMD's VSA code will claim all timers during
|
||||
* bootup; we certainly don't want to take them if they're already in use.
|
||||
* In other cases (such as with VSAless OpenFirmware), the system firmware
|
||||
* leaves timers available for us to use.
|
||||
*/
|
||||
|
||||
|
||||
static int timers = -1;
|
||||
|
||||
static void geode_mfgpt_detect(void)
|
||||
{
|
||||
int i;
|
||||
u16 val;
|
||||
|
||||
timers = 0;
|
||||
|
||||
if (disable) {
|
||||
printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!geode_get_dev_base(GEODE_DEV_MFGPT)) {
|
||||
printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
|
||||
val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
|
||||
if (!(val & MFGPT_SETUP_SETUP)) {
|
||||
mfgpt_timers[i].avail = 1;
|
||||
timers++;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers);
|
||||
}
|
||||
|
||||
int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
|
||||
{
|
||||
u32 msr, mask, value, dummy;
|
||||
int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
|
||||
|
||||
if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* The register maps for these are described in sections 6.17.1.x of
|
||||
* the AMD Geode CS5536 Companion Device Data Book.
|
||||
*/
|
||||
switch (event) {
|
||||
case MFGPT_EVENT_RESET:
|
||||
/*
|
||||
* XXX: According to the docs, we cannot reset timers above
|
||||
* 6; that is, resets for 7 and 8 will be ignored. Is this
|
||||
* a problem? -dilinger
|
||||
*/
|
||||
msr = MSR_MFGPT_NR;
|
||||
mask = 1 << (timer + 24);
|
||||
break;
|
||||
|
||||
case MFGPT_EVENT_NMI:
|
||||
msr = MSR_MFGPT_NR;
|
||||
mask = 1 << (timer + shift);
|
||||
break;
|
||||
|
||||
case MFGPT_EVENT_IRQ:
|
||||
msr = MSR_MFGPT_IRQ;
|
||||
mask = 1 << (timer + shift);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rdmsr(msr, value, dummy);
|
||||
|
||||
if (enable)
|
||||
value |= mask;
|
||||
else
|
||||
value &= ~mask;
|
||||
|
||||
wrmsr(msr, value, dummy);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
|
||||
|
||||
int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
|
||||
{
|
||||
u32 zsel, lpc, dummy;
|
||||
int shift;
|
||||
|
||||
if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
|
||||
* is using the same CMP of the timer's Siamese twin, the IRQ is set to
|
||||
* 2, and we mustn't use nor change it.
|
||||
* XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
|
||||
* IRQ of the 1st. This can only happen if forcing an IRQ, calling this
|
||||
* with *irq==0 is safe. Currently there _are_ no 2 drivers.
|
||||
*/
|
||||
rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
|
||||
shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
|
||||
if (((zsel >> shift) & 0xF) == 2)
|
||||
return -EIO;
|
||||
|
||||
/* Choose IRQ: if none supplied, keep IRQ already set or use default */
|
||||
if (!*irq)
|
||||
*irq = (zsel >> shift) & 0xF;
|
||||
if (!*irq)
|
||||
*irq = MFGPT_DEFAULT_IRQ;
|
||||
|
||||
/* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
|
||||
if (*irq < 1 || *irq == 2 || *irq > 15)
|
||||
return -EIO;
|
||||
rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
|
||||
if (lpc & (1 << *irq))
|
||||
return -EIO;
|
||||
|
||||
/* All chosen and checked - go for it */
|
||||
if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
|
||||
return -EIO;
|
||||
if (enable) {
|
||||
zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
|
||||
wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mfgpt_get(int timer)
|
||||
{
|
||||
mfgpt_timers[timer].avail = 0;
|
||||
printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
|
||||
return timer;
|
||||
}
|
||||
|
||||
int geode_mfgpt_alloc_timer(int timer, int domain)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (timers == -1) {
|
||||
/* timers haven't been detected yet */
|
||||
geode_mfgpt_detect();
|
||||
}
|
||||
|
||||
if (!timers)
|
||||
return -1;
|
||||
|
||||
if (timer >= MFGPT_MAX_TIMERS)
|
||||
return -1;
|
||||
|
||||
if (timer < 0) {
|
||||
/* Try to find an available timer */
|
||||
for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
|
||||
if (mfgpt_timers[i].avail)
|
||||
return mfgpt_get(i);
|
||||
|
||||
if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* If they requested a specific timer, try to honor that */
|
||||
if (mfgpt_timers[timer].avail)
|
||||
return mfgpt_get(timer);
|
||||
}
|
||||
|
||||
/* No timers available - too bad */
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
|
||||
|
||||
|
||||
#ifdef CONFIG_GEODE_MFGPT_TIMER
|
||||
|
||||
/*
|
||||
* The MFPGT timers on the CS5536 provide us with suitable timers to use
|
||||
* as clock event sources - not as good as a HPET or APIC, but certainly
|
||||
* better than the PIT. This isn't a general purpose MFGPT driver, but
|
||||
* a simplified one designed specifically to act as a clock event source.
|
||||
* For full details about the MFGPT, please consult the CS5536 data sheet.
|
||||
*/
|
||||
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
|
||||
static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
|
||||
static u16 mfgpt_event_clock;
|
||||
|
||||
static int irq;
|
||||
static int __init mfgpt_setup(char *str)
|
||||
{
|
||||
get_option(&str, &irq);
|
||||
return 1;
|
||||
}
|
||||
__setup("mfgpt_irq=", mfgpt_setup);
|
||||
|
||||
static void mfgpt_disable_timer(u16 clock)
|
||||
{
|
||||
/* avoid races by clearing CMP1 and CMP2 unconditionally */
|
||||
geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN |
|
||||
MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2);
|
||||
}
|
||||
|
||||
static int mfgpt_next_event(unsigned long, struct clock_event_device *);
|
||||
static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
|
||||
|
||||
static struct clock_event_device mfgpt_clockevent = {
|
||||
.name = "mfgpt-timer",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
|
||||
.set_mode = mfgpt_set_mode,
|
||||
.set_next_event = mfgpt_next_event,
|
||||
.rating = 250,
|
||||
.cpumask = cpu_all_mask,
|
||||
.shift = 32
|
||||
};
|
||||
|
||||
static void mfgpt_start_timer(u16 delta)
|
||||
{
|
||||
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
|
||||
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
|
||||
|
||||
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
|
||||
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
|
||||
}
|
||||
|
||||
static void mfgpt_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
mfgpt_disable_timer(mfgpt_event_clock);
|
||||
|
||||
if (mode == CLOCK_EVT_MODE_PERIODIC)
|
||||
mfgpt_start_timer(MFGPT_PERIODIC);
|
||||
|
||||
mfgpt_tick_mode = mode;
|
||||
}
|
||||
|
||||
static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
|
||||
{
|
||||
mfgpt_start_timer(delta);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
|
||||
{
|
||||
u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP);
|
||||
|
||||
/* See if the interrupt was for us */
|
||||
if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Turn off the clock (and clear the event) */
|
||||
mfgpt_disable_timer(mfgpt_event_clock);
|
||||
|
||||
if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Clear the counter */
|
||||
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
|
||||
|
||||
/* Restart the clock in periodic mode */
|
||||
|
||||
if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
|
||||
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
|
||||
MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
|
||||
}
|
||||
|
||||
mfgpt_clockevent.event_handler(&mfgpt_clockevent);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction mfgptirq = {
|
||||
.handler = mfgpt_tick,
|
||||
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
|
||||
.name = "mfgpt-timer"
|
||||
};
|
||||
|
||||
int __init mfgpt_timer_setup(void)
|
||||
{
|
||||
int timer, ret;
|
||||
u16 val;
|
||||
|
||||
timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
|
||||
if (timer < 0) {
|
||||
printk(KERN_ERR
|
||||
"mfgpt-timer: Could not allocate a MFPGT timer\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
mfgpt_event_clock = timer;
|
||||
|
||||
/* Set up the IRQ on the MFGPT side */
|
||||
if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
|
||||
printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* And register it with the kernel */
|
||||
ret = setup_irq(irq, &mfgptirq);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR
|
||||
"mfgpt-timer: Unable to set up the interrupt.\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Set the clock scale and enable the event mode for CMP2 */
|
||||
val = MFGPT_SCALE | (3 << 8);
|
||||
|
||||
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
|
||||
|
||||
/* Set up the clock event */
|
||||
mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
|
||||
mfgpt_clockevent.shift);
|
||||
mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
|
||||
&mfgpt_clockevent);
|
||||
mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
|
||||
&mfgpt_clockevent);
|
||||
|
||||
printk(KERN_INFO
|
||||
"mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
|
||||
timer, irq);
|
||||
clockevents_register_device(&mfgpt_clockevent);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
|
||||
printk(KERN_ERR
|
||||
"mfgpt-timer: Unable to set up the MFGPT clock source\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
#endif
|
@@ -212,7 +212,7 @@ static int __init olpc_init(void)
|
||||
unsigned char *romsig;
|
||||
|
||||
/* The ioremap check is dangerous; limit what we run it on */
|
||||
if (!is_geode() || geode_has_vsa2())
|
||||
if (!is_geode() || cs5535_has_vsa2())
|
||||
return 0;
|
||||
|
||||
spin_lock_init(&ec_lock);
|
||||
@@ -244,7 +244,7 @@ static int __init olpc_init(void)
|
||||
(unsigned char *) &olpc_platform_info.ecver, 1);
|
||||
|
||||
/* check to see if the VSA exists */
|
||||
if (geode_has_vsa2())
|
||||
if (cs5535_has_vsa2())
|
||||
olpc_platform_info.flags |= OLPC_F_VSA;
|
||||
|
||||
printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
|
||||
|
@@ -8,9 +8,9 @@
|
||||
#include <asm/paravirt.h>
|
||||
|
||||
static inline void
|
||||
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||
default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__raw_spin_lock(lock);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
|
||||
struct pv_lock_ops pv_lock_ops = {
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
@@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
|
||||
|
||||
spin_lock_irqsave(&tbl->it_lock, flags);
|
||||
|
||||
iommu_area_reserve(tbl->it_map, index, npages);
|
||||
bitmap_set(tbl->it_map, index, npages);
|
||||
|
||||
spin_unlock_irqrestore(&tbl->it_lock, flags);
|
||||
}
|
||||
@@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||
|
||||
spin_lock_irqsave(&tbl->it_lock, flags);
|
||||
|
||||
iommu_area_free(tbl->it_map, entry, npages);
|
||||
bitmap_clear(tbl->it_map, entry, npages);
|
||||
|
||||
spin_unlock_irqrestore(&tbl->it_lock, flags);
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
@@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
||||
iommu_area_free(iommu_gart_bitmap, offset, size);
|
||||
bitmap_clear(iommu_gart_bitmap, offset, size);
|
||||
if (offset >= next_bit)
|
||||
next_bit = offset + size;
|
||||
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
||||
@@ -792,7 +792,7 @@ int __init gart_iommu_init(void)
|
||||
* Out of IOMMU space handling.
|
||||
* Reserve some invalid pages at the beginning of the GART.
|
||||
*/
|
||||
iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
||||
bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
||||
|
||||
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
|
||||
iommu_size >> 20);
|
||||
|
@@ -509,14 +509,14 @@ static int genregs_get(struct task_struct *target,
|
||||
{
|
||||
if (kbuf) {
|
||||
unsigned long *k = kbuf;
|
||||
while (count > 0) {
|
||||
while (count >= sizeof(*k)) {
|
||||
*k++ = getreg(target, pos);
|
||||
count -= sizeof(*k);
|
||||
pos += sizeof(*k);
|
||||
}
|
||||
} else {
|
||||
unsigned long __user *u = ubuf;
|
||||
while (count > 0) {
|
||||
while (count >= sizeof(*u)) {
|
||||
if (__put_user(getreg(target, pos), u++))
|
||||
return -EFAULT;
|
||||
count -= sizeof(*u);
|
||||
@@ -535,14 +535,14 @@ static int genregs_set(struct task_struct *target,
|
||||
int ret = 0;
|
||||
if (kbuf) {
|
||||
const unsigned long *k = kbuf;
|
||||
while (count > 0 && !ret) {
|
||||
while (count >= sizeof(*k) && !ret) {
|
||||
ret = putreg(target, pos, *k++);
|
||||
count -= sizeof(*k);
|
||||
pos += sizeof(*k);
|
||||
}
|
||||
} else {
|
||||
const unsigned long __user *u = ubuf;
|
||||
while (count > 0 && !ret) {
|
||||
while (count >= sizeof(*u) && !ret) {
|
||||
unsigned long word;
|
||||
ret = __get_user(word, u++);
|
||||
if (ret)
|
||||
@@ -555,7 +555,9 @@ static int genregs_set(struct task_struct *target,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ptrace_triggered(struct perf_event *bp, void *data)
|
||||
static void ptrace_triggered(struct perf_event *bp, int nmi,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
int i;
|
||||
struct thread_struct *thread = &(current->thread);
|
||||
@@ -593,13 +595,13 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
|
||||
return dr7;
|
||||
}
|
||||
|
||||
static struct perf_event *
|
||||
static int
|
||||
ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
|
||||
struct task_struct *tsk, int disabled)
|
||||
{
|
||||
int err;
|
||||
int gen_len, gen_type;
|
||||
DEFINE_BREAKPOINT_ATTR(attr);
|
||||
struct perf_event_attr attr;
|
||||
|
||||
/*
|
||||
* We shoud have at least an inactive breakpoint at this
|
||||
@@ -607,18 +609,18 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
|
||||
* written the address register first
|
||||
*/
|
||||
if (!bp)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
attr = bp->attr;
|
||||
attr.bp_len = gen_len;
|
||||
attr.bp_type = gen_type;
|
||||
attr.disabled = disabled;
|
||||
|
||||
return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk);
|
||||
return modify_user_hw_breakpoint(bp, &attr);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -656,28 +658,17 @@ restore:
|
||||
if (!second_pass)
|
||||
continue;
|
||||
|
||||
thread->ptrace_bps[i] = NULL;
|
||||
bp = ptrace_modify_breakpoint(bp, len, type,
|
||||
rc = ptrace_modify_breakpoint(bp, len, type,
|
||||
tsk, 1);
|
||||
if (IS_ERR(bp)) {
|
||||
rc = PTR_ERR(bp);
|
||||
thread->ptrace_bps[i] = NULL;
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
thread->ptrace_bps[i] = bp;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
|
||||
|
||||
/* Incorrect bp, or we have a bug in bp API */
|
||||
if (IS_ERR(bp)) {
|
||||
rc = PTR_ERR(bp);
|
||||
thread->ptrace_bps[i] = NULL;
|
||||
rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
thread->ptrace_bps[i] = bp;
|
||||
}
|
||||
/*
|
||||
* Make a second pass to free the remaining unused breakpoints
|
||||
@@ -721,9 +712,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
|
||||
{
|
||||
struct perf_event *bp;
|
||||
struct thread_struct *t = &tsk->thread;
|
||||
DEFINE_BREAKPOINT_ATTR(attr);
|
||||
struct perf_event_attr attr;
|
||||
|
||||
if (!t->ptrace_bps[nr]) {
|
||||
hw_breakpoint_init(&attr);
|
||||
/*
|
||||
* Put stub len and type to register (reserve) an inactive but
|
||||
* correct bp
|
||||
@@ -734,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
|
||||
attr.disabled = 1;
|
||||
|
||||
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
|
||||
|
||||
/*
|
||||
* CHECKME: the previous code returned -EIO if the addr wasn't
|
||||
* a valid task virtual addr. The new one will return -EINVAL in
|
||||
* this case.
|
||||
* -EINVAL may be what we want for in-kernel breakpoints users,
|
||||
* but -EIO looks better for ptrace, since we refuse a register
|
||||
* writing for the user. And anyway this is the previous
|
||||
* behaviour.
|
||||
*/
|
||||
if (IS_ERR(bp))
|
||||
return PTR_ERR(bp);
|
||||
|
||||
t->ptrace_bps[nr] = bp;
|
||||
} else {
|
||||
int err;
|
||||
|
||||
bp = t->ptrace_bps[nr];
|
||||
t->ptrace_bps[nr] = NULL;
|
||||
|
||||
attr = bp->attr;
|
||||
attr.bp_addr = addr;
|
||||
bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk);
|
||||
err = modify_user_hw_breakpoint(bp, &attr);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
/*
|
||||
* CHECKME: the previous code returned -EIO if the addr wasn't a
|
||||
* valid task virtual addr. The new one will return -EINVAL in this
|
||||
* case.
|
||||
* -EINVAL may be what we want for in-kernel breakpoints users, but
|
||||
* -EIO looks better for ptrace, since we refuse a register writing
|
||||
* for the user. And anyway this is the previous behaviour.
|
||||
*/
|
||||
if (IS_ERR(bp))
|
||||
return PTR_ERR(bp);
|
||||
|
||||
t->ptrace_bps[nr] = bp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1460,14 +1458,14 @@ static int genregs32_get(struct task_struct *target,
|
||||
{
|
||||
if (kbuf) {
|
||||
compat_ulong_t *k = kbuf;
|
||||
while (count > 0) {
|
||||
while (count >= sizeof(*k)) {
|
||||
getreg32(target, pos, k++);
|
||||
count -= sizeof(*k);
|
||||
pos += sizeof(*k);
|
||||
}
|
||||
} else {
|
||||
compat_ulong_t __user *u = ubuf;
|
||||
while (count > 0) {
|
||||
while (count >= sizeof(*u)) {
|
||||
compat_ulong_t word;
|
||||
getreg32(target, pos, &word);
|
||||
if (__put_user(word, u++))
|
||||
@@ -1488,14 +1486,14 @@ static int genregs32_set(struct task_struct *target,
|
||||
int ret = 0;
|
||||
if (kbuf) {
|
||||
const compat_ulong_t *k = kbuf;
|
||||
while (count > 0 && !ret) {
|
||||
while (count >= sizeof(*k) && !ret) {
|
||||
ret = putreg32(target, pos, *k++);
|
||||
count -= sizeof(*k);
|
||||
pos += sizeof(*k);
|
||||
}
|
||||
} else {
|
||||
const compat_ulong_t __user *u = ubuf;
|
||||
while (count > 0 && !ret) {
|
||||
while (count >= sizeof(*u) && !ret) {
|
||||
compat_ulong_t word;
|
||||
ret = __get_user(word, u++);
|
||||
if (ret)
|
||||
@@ -1678,21 +1676,33 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void fill_sigtrap_info(struct task_struct *tsk,
|
||||
struct pt_regs *regs,
|
||||
int error_code, int si_code,
|
||||
struct siginfo *info)
|
||||
{
|
||||
tsk->thread.trap_no = 1;
|
||||
tsk->thread.error_code = error_code;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->si_signo = SIGTRAP;
|
||||
info->si_code = si_code;
|
||||
info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
|
||||
}
|
||||
|
||||
void user_single_step_siginfo(struct task_struct *tsk,
|
||||
struct pt_regs *regs,
|
||||
struct siginfo *info)
|
||||
{
|
||||
fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
|
||||
}
|
||||
|
||||
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
||||
int error_code, int si_code)
|
||||
{
|
||||
struct siginfo info;
|
||||
|
||||
tsk->thread.trap_no = 1;
|
||||
tsk->thread.error_code = error_code;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_code = si_code;
|
||||
|
||||
/* User-mode ip? */
|
||||
info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
|
||||
|
||||
fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
|
||||
/* Send us the fake SIGTRAP */
|
||||
force_sig_info(SIGTRAP, &info, tsk);
|
||||
}
|
||||
@@ -1757,29 +1767,22 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
|
||||
|
||||
asmregparm void syscall_trace_leave(struct pt_regs *regs)
|
||||
{
|
||||
bool step;
|
||||
|
||||
if (unlikely(current->audit_context))
|
||||
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
|
||||
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||
trace_sys_exit(regs, regs->ax);
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, 0);
|
||||
|
||||
/*
|
||||
* If TIF_SYSCALL_EMU is set, we only get here because of
|
||||
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
|
||||
* We already reported this syscall instruction in
|
||||
* syscall_trace_enter(), so don't do any more now.
|
||||
* syscall_trace_enter().
|
||||
*/
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we are single-stepping, synthesize a trap to follow the
|
||||
* system call instruction.
|
||||
*/
|
||||
if (test_thread_flag(TIF_SINGLESTEP) &&
|
||||
tracehook_consider_fatal_signal(current, SIGTRAP))
|
||||
send_sigtrap(current, regs, 0, TRAP_BRKPT);
|
||||
step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
|
||||
!test_thread_flag(TIF_SYSCALL_EMU);
|
||||
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, step);
|
||||
}
|
||||
|
@@ -12,7 +12,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/reboot_fixups.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/geode.h>
|
||||
#include <linux/cs5535.h>
|
||||
|
||||
static void cs5530a_warm_reset(struct pci_dev *dev)
|
||||
{
|
||||
|
@@ -24,31 +24,6 @@
|
||||
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long pgoff)
|
||||
{
|
||||
int error = -EBADF;
|
||||
struct file *file = NULL;
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
if (file)
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the select(nd, in, out, ex, tv) and mmap() system
|
||||
* calls. Linux/i386 didn't use to be able to handle more than
|
||||
@@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
|
||||
if (a.offset & ~PAGE_MASK)
|
||||
goto out;
|
||||
|
||||
err = sys_mmap2(a.addr, a.len, a.prot, a.flags,
|
||||
err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
|
||||
a.fd, a.offset >> PAGE_SHIFT);
|
||||
out:
|
||||
return err;
|
||||
|
@@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, fd, unsigned long, off)
|
||||
{
|
||||
long error;
|
||||
struct file *file;
|
||||
|
||||
error = -EINVAL;
|
||||
if (off & ~PAGE_MASK)
|
||||
goto out;
|
||||
|
||||
error = -EBADF;
|
||||
file = NULL;
|
||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
}
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if (file)
|
||||
fput(file);
|
||||
error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
|
||||
.long sys_ni_syscall /* reserved for streams2 */
|
||||
.long ptregs_vfork /* 190 */
|
||||
.long sys_getrlimit
|
||||
.long sys_mmap2
|
||||
.long sys_mmap_pgoff
|
||||
.long sys_truncate64
|
||||
.long sys_ftruncate64
|
||||
.long sys_stat64 /* 195 */
|
||||
|
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
|
||||
* we want to have the fastest, inlined, non-debug version
|
||||
* of a critical section, to be able to prove TSC time-warps:
|
||||
*/
|
||||
static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static __cpuinitdata cycles_t last_tsc;
|
||||
static __cpuinitdata cycles_t max_warp;
|
||||
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
|
||||
* previous TSC that was measured (possibly on
|
||||
* another CPU) and update the previous TSC timestamp.
|
||||
*/
|
||||
__raw_spin_lock(&sync_lock);
|
||||
arch_spin_lock(&sync_lock);
|
||||
prev = last_tsc;
|
||||
rdtsc_barrier();
|
||||
now = get_cycles();
|
||||
rdtsc_barrier();
|
||||
last_tsc = now;
|
||||
__raw_spin_unlock(&sync_lock);
|
||||
arch_spin_unlock(&sync_lock);
|
||||
|
||||
/*
|
||||
* Be nice every now and then (and also check whether
|
||||
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
|
||||
* we saw a time-warp of the TSC going backwards:
|
||||
*/
|
||||
if (unlikely(prev > now)) {
|
||||
__raw_spin_lock(&sync_lock);
|
||||
arch_spin_lock(&sync_lock);
|
||||
max_warp = max(max_warp, prev - now);
|
||||
nr_warps++;
|
||||
__raw_spin_unlock(&sync_lock);
|
||||
arch_spin_unlock(&sync_lock);
|
||||
}
|
||||
}
|
||||
WARN(!(now-start),
|
||||
|
@@ -54,4 +54,6 @@ EXPORT_SYMBOL(__memcpy);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(init_level4_pgt);
|
||||
EXPORT_SYMBOL(load_gs_index);
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
EXPORT_SYMBOL(native_load_gs_index);
|
||||
#endif
|
||||
|
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
|
||||
static int svm_hardware_enable(void *garbage)
|
||||
{
|
||||
|
||||
struct svm_cpu_data *svm_data;
|
||||
struct svm_cpu_data *sd;
|
||||
uint64_t efer;
|
||||
struct descriptor_table gdt_descr;
|
||||
struct desc_struct *gdt;
|
||||
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
|
||||
me);
|
||||
return -EINVAL;
|
||||
}
|
||||
svm_data = per_cpu(svm_data, me);
|
||||
sd = per_cpu(svm_data, me);
|
||||
|
||||
if (!svm_data) {
|
||||
if (!sd) {
|
||||
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
|
||||
me);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
svm_data->asid_generation = 1;
|
||||
svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
|
||||
svm_data->next_asid = svm_data->max_asid + 1;
|
||||
sd->asid_generation = 1;
|
||||
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
|
||||
sd->next_asid = sd->max_asid + 1;
|
||||
|
||||
kvm_get_gdt(&gdt_descr);
|
||||
gdt = (struct desc_struct *)gdt_descr.base;
|
||||
svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
|
||||
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
|
||||
|
||||
wrmsrl(MSR_EFER, efer | EFER_SVME);
|
||||
|
||||
wrmsrl(MSR_VM_HSAVE_PA,
|
||||
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
|
||||
wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void svm_cpu_uninit(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *svm_data
|
||||
= per_cpu(svm_data, raw_smp_processor_id());
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
|
||||
|
||||
if (!svm_data)
|
||||
if (!sd)
|
||||
return;
|
||||
|
||||
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
|
||||
__free_page(svm_data->save_area);
|
||||
kfree(svm_data);
|
||||
__free_page(sd->save_area);
|
||||
kfree(sd);
|
||||
}
|
||||
|
||||
static int svm_cpu_init(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *svm_data;
|
||||
struct svm_cpu_data *sd;
|
||||
int r;
|
||||
|
||||
svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
|
||||
if (!svm_data)
|
||||
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
|
||||
if (!sd)
|
||||
return -ENOMEM;
|
||||
svm_data->cpu = cpu;
|
||||
svm_data->save_area = alloc_page(GFP_KERNEL);
|
||||
sd->cpu = cpu;
|
||||
sd->save_area = alloc_page(GFP_KERNEL);
|
||||
r = -ENOMEM;
|
||||
if (!svm_data->save_area)
|
||||
if (!sd->save_area)
|
||||
goto err_1;
|
||||
|
||||
per_cpu(svm_data, cpu) = svm_data;
|
||||
per_cpu(svm_data, cpu) = sd;
|
||||
|
||||
return 0;
|
||||
|
||||
err_1:
|
||||
kfree(svm_data);
|
||||
kfree(sd);
|
||||
return r;
|
||||
|
||||
}
|
||||
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
|
||||
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
|
||||
{
|
||||
if (svm_data->next_asid > svm_data->max_asid) {
|
||||
++svm_data->asid_generation;
|
||||
svm_data->next_asid = 1;
|
||||
if (sd->next_asid > sd->max_asid) {
|
||||
++sd->asid_generation;
|
||||
sd->next_asid = 1;
|
||||
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
|
||||
}
|
||||
|
||||
svm->asid_generation = svm_data->asid_generation;
|
||||
svm->vmcb->control.asid = svm_data->next_asid++;
|
||||
svm->asid_generation = sd->asid_generation;
|
||||
svm->vmcb->control.asid = sd->next_asid++;
|
||||
}
|
||||
|
||||
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
|
||||
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
||||
struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
|
||||
svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||
sd->tss_desc->type = 9; /* available 32/64-bit TSS */
|
||||
load_TR_desc();
|
||||
}
|
||||
|
||||
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
||||
struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||
|
||||
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
|
||||
/* FIXME: handle wraparound of asid_generation */
|
||||
if (svm->asid_generation != svm_data->asid_generation)
|
||||
new_asid(svm, svm_data);
|
||||
if (svm->asid_generation != sd->asid_generation)
|
||||
new_asid(svm, sd);
|
||||
}
|
||||
|
||||
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
|
||||
|
@@ -5,7 +5,7 @@
|
||||
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
|
||||
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
|
||||
quiet_cmd_inat_tables = GEN $@
|
||||
cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
|
||||
cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
|
||||
|
||||
$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
|
||||
$(call cmd,inat_tables)
|
||||
@@ -20,7 +20,7 @@ lib-y := delay.o
|
||||
lib-y += thunk_$(BITS).o
|
||||
lib-y += usercopy_$(BITS).o getuser.o putuser.o
|
||||
lib-y += memcpy_$(BITS).o
|
||||
lib-y += insn.o inat.o
|
||||
lib-$(CONFIG_KPROBES) += insn.o inat.o
|
||||
|
||||
obj-y += msr.o msr-reg.o msr-reg-export.o
|
||||
|
||||
|
@@ -704,9 +704,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
if (!range_is_allowed(pfn, size))
|
||||
return 0;
|
||||
|
||||
if (file->f_flags & O_SYNC) {
|
||||
if (file->f_flags & O_DSYNC)
|
||||
flags = _PAGE_CACHE_UC_MINUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
|
@@ -15,3 +15,8 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
|
||||
|
||||
obj-y += common.o early.o
|
||||
obj-y += amd_bus.o
|
||||
obj-$(CONFIG_X86_64) += bus_numa.o intel_bus.o
|
||||
|
||||
ifeq ($(CONFIG_PCI_DEBUG),y)
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
endif
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <asm/pci_x86.h>
|
||||
|
||||
struct pci_root_info {
|
||||
struct acpi_device *bridge;
|
||||
char *name;
|
||||
unsigned int res_num;
|
||||
struct resource *res;
|
||||
@@ -58,6 +59,30 @@ bus_has_transparent_bridge(struct pci_bus *bus)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
align_resource(struct acpi_device *bridge, struct resource *res)
|
||||
{
|
||||
int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
|
||||
|
||||
/*
|
||||
* Host bridge windows are not BARs, but the decoders on the PCI side
|
||||
* that claim this address space have starting alignment and length
|
||||
* constraints, so fix any obvious BIOS goofs.
|
||||
*/
|
||||
if (!IS_ALIGNED(res->start, align)) {
|
||||
dev_printk(KERN_DEBUG, &bridge->dev,
|
||||
"host bridge window %pR invalid; "
|
||||
"aligning start to %d-byte boundary\n", res, align);
|
||||
res->start &= ~(align - 1);
|
||||
}
|
||||
if (!IS_ALIGNED(res->end + 1, align)) {
|
||||
dev_printk(KERN_DEBUG, &bridge->dev,
|
||||
"host bridge window %pR invalid; "
|
||||
"aligning end to %d-byte boundary\n", res, align);
|
||||
res->end = ALIGN(res->end, align) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
setup_resource(struct acpi_resource *acpi_res, void *data)
|
||||
{
|
||||
@@ -91,11 +116,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
|
||||
start = addr.minimum + addr.translation_offset;
|
||||
end = start + addr.address_length - 1;
|
||||
if (info->res_num >= max_root_bus_resources) {
|
||||
printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
|
||||
"from %s for %s due to _CRS returning more than "
|
||||
"%d resource descriptors\n", (unsigned long) start,
|
||||
(unsigned long) end, root->name, info->name,
|
||||
max_root_bus_resources);
|
||||
if (pci_probe & PCI_USE__CRS)
|
||||
printk(KERN_WARNING "PCI: Failed to allocate "
|
||||
"0x%lx-0x%lx from %s for %s due to _CRS "
|
||||
"returning more than %d resource descriptors\n",
|
||||
(unsigned long) start, (unsigned long) end,
|
||||
root->name, info->name, max_root_bus_resources);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
@@ -105,14 +131,28 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
res->child = NULL;
|
||||
align_resource(info->bridge, res);
|
||||
|
||||
if (!(pci_probe & PCI_USE__CRS)) {
|
||||
dev_printk(KERN_DEBUG, &info->bridge->dev,
|
||||
"host bridge window %pR (ignored)\n", res);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
if (insert_resource(root, res)) {
|
||||
printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
|
||||
"from %s for %s\n", (unsigned long) res->start,
|
||||
(unsigned long) res->end, root->name, info->name);
|
||||
dev_err(&info->bridge->dev,
|
||||
"can't allocate host bridge window %pR\n", res);
|
||||
} else {
|
||||
info->bus->resource[info->res_num] = res;
|
||||
info->res_num++;
|
||||
if (addr.translation_offset)
|
||||
dev_info(&info->bridge->dev, "host bridge window %pR "
|
||||
"(PCI address [%#llx-%#llx])\n",
|
||||
res, res->start - addr.translation_offset,
|
||||
res->end - addr.translation_offset);
|
||||
else
|
||||
dev_info(&info->bridge->dev,
|
||||
"host bridge window %pR\n", res);
|
||||
}
|
||||
return AE_OK;
|
||||
}
|
||||
@@ -124,6 +164,12 @@ get_current_resources(struct acpi_device *device, int busnum,
|
||||
struct pci_root_info info;
|
||||
size_t size;
|
||||
|
||||
if (!(pci_probe & PCI_USE__CRS))
|
||||
dev_info(&device->dev,
|
||||
"ignoring host bridge windows from ACPI; "
|
||||
"boot with \"pci=use_crs\" to use them\n");
|
||||
|
||||
info.bridge = device;
|
||||
info.bus = bus;
|
||||
info.res_num = 0;
|
||||
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
|
||||
@@ -163,8 +209,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
|
||||
#endif
|
||||
|
||||
if (domain && !pci_domains_supported) {
|
||||
printk(KERN_WARNING "PCI: Multiple domains not supported "
|
||||
"(dom %d, bus %d)\n", domain, busnum);
|
||||
printk(KERN_WARNING "pci_bus %04x:%02x: "
|
||||
"ignored (multiple domains not supported)\n",
|
||||
domain, busnum);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -188,7 +235,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
|
||||
*/
|
||||
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
|
||||
if (!sd) {
|
||||
printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
|
||||
printk(KERN_WARNING "pci_bus %04x:%02x: "
|
||||
"ignored (out of memory)\n", domain, busnum);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -209,9 +257,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
|
||||
} else {
|
||||
bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
|
||||
if (bus) {
|
||||
if (pci_probe & PCI_USE__CRS)
|
||||
get_current_resources(device, busnum, domain,
|
||||
bus);
|
||||
get_current_resources(device, busnum, domain, bus);
|
||||
bus->subordinate = pci_scan_child_bus(bus);
|
||||
}
|
||||
}
|
||||
|
@@ -6,10 +6,10 @@
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <linux/cpumask.h>
|
||||
#endif
|
||||
|
||||
#include "bus_numa.h"
|
||||
|
||||
/*
|
||||
* This discovers the pcibus <-> node mapping on AMD K8.
|
||||
* also get peer root bus resource for io,mmio
|
||||
@@ -17,67 +17,6 @@
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
* sub bus (transparent) will use entres from 3 to store extra from root,
|
||||
* so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
|
||||
*/
|
||||
#define RES_NUM 16
|
||||
struct pci_root_info {
|
||||
char name[12];
|
||||
unsigned int res_num;
|
||||
struct resource res[RES_NUM];
|
||||
int bus_min;
|
||||
int bus_max;
|
||||
int node;
|
||||
int link;
|
||||
};
|
||||
|
||||
/* 4 at this time, it may become to 32 */
|
||||
#define PCI_ROOT_NR 4
|
||||
static int pci_root_num;
|
||||
static struct pci_root_info pci_root_info[PCI_ROOT_NR];
|
||||
|
||||
void x86_pci_root_bus_res_quirks(struct pci_bus *b)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
struct pci_root_info *info;
|
||||
|
||||
/* don't go for it if _CRS is used already */
|
||||
if (b->resource[0] != &ioport_resource ||
|
||||
b->resource[1] != &iomem_resource)
|
||||
return;
|
||||
|
||||
/* if only one root bus, don't need to anything */
|
||||
if (pci_root_num < 2)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pci_root_num; i++) {
|
||||
if (pci_root_info[i].bus_min == b->number)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == pci_root_num)
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
|
||||
b->number);
|
||||
|
||||
info = &pci_root_info[i];
|
||||
for (j = 0; j < info->res_num; j++) {
|
||||
struct resource *res;
|
||||
struct resource *root;
|
||||
|
||||
res = &info->res[j];
|
||||
b->resource[j] = res;
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
root = &ioport_resource;
|
||||
else
|
||||
root = &iomem_resource;
|
||||
insert_resource(root, res);
|
||||
}
|
||||
}
|
||||
|
||||
#define RANGE_NUM 16
|
||||
|
||||
struct res_range {
|
||||
@@ -130,52 +69,6 @@ static void __init update_range(struct res_range *range, size_t start,
|
||||
}
|
||||
}
|
||||
|
||||
static void __init update_res(struct pci_root_info *info, size_t start,
|
||||
size_t end, unsigned long flags, int merge)
|
||||
{
|
||||
int i;
|
||||
struct resource *res;
|
||||
|
||||
if (!merge)
|
||||
goto addit;
|
||||
|
||||
/* try to merge it with old one */
|
||||
for (i = 0; i < info->res_num; i++) {
|
||||
size_t final_start, final_end;
|
||||
size_t common_start, common_end;
|
||||
|
||||
res = &info->res[i];
|
||||
if (res->flags != flags)
|
||||
continue;
|
||||
|
||||
common_start = max((size_t)res->start, start);
|
||||
common_end = min((size_t)res->end, end);
|
||||
if (common_start > common_end + 1)
|
||||
continue;
|
||||
|
||||
final_start = min((size_t)res->start, start);
|
||||
final_end = max((size_t)res->end, end);
|
||||
|
||||
res->start = final_start;
|
||||
res->end = final_end;
|
||||
return;
|
||||
}
|
||||
|
||||
addit:
|
||||
|
||||
/* need to add that */
|
||||
if (info->res_num >= RES_NUM)
|
||||
return;
|
||||
|
||||
res = &info->res[info->res_num];
|
||||
res->name = info->name;
|
||||
res->flags = flags;
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
res->child = NULL;
|
||||
info->res_num++;
|
||||
}
|
||||
|
||||
struct pci_hostbridge_probe {
|
||||
u32 bus;
|
||||
u32 slot;
|
||||
@@ -230,7 +123,6 @@ static int __init early_fill_mp_bus_info(void)
|
||||
int j;
|
||||
unsigned bus;
|
||||
unsigned slot;
|
||||
int found;
|
||||
int node;
|
||||
int link;
|
||||
int def_node;
|
||||
@@ -247,7 +139,7 @@ static int __init early_fill_mp_bus_info(void)
|
||||
if (!early_pci_allowed())
|
||||
return -1;
|
||||
|
||||
found = 0;
|
||||
found_all_numa_early = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
|
||||
u32 id;
|
||||
u16 device;
|
||||
@@ -261,12 +153,12 @@ static int __init early_fill_mp_bus_info(void)
|
||||
device = (id>>16) & 0xffff;
|
||||
if (pci_probes[i].vendor == vendor &&
|
||||
pci_probes[i].device == device) {
|
||||
found = 1;
|
||||
found_all_numa_early = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
if (!found_all_numa_early)
|
||||
return 0;
|
||||
|
||||
pci_root_num = 0;
|
||||
@@ -488,7 +380,7 @@ static int __init early_fill_mp_bus_info(void)
|
||||
info = &pci_root_info[i];
|
||||
res_num = info->res_num;
|
||||
busnum = info->bus_min;
|
||||
printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n",
|
||||
printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
|
||||
info->bus_min, info->bus_max, info->node, info->link);
|
||||
for (j = 0; j < res_num; j++) {
|
||||
res = &info->res[j];
|
||||
|
101
arch/x86/pci/bus_numa.c
Normal file
101
arch/x86/pci/bus_numa.c
Normal file
@@ -0,0 +1,101 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "bus_numa.h"
|
||||
|
||||
int pci_root_num;
|
||||
struct pci_root_info pci_root_info[PCI_ROOT_NR];
|
||||
int found_all_numa_early;
|
||||
|
||||
void x86_pci_root_bus_res_quirks(struct pci_bus *b)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
struct pci_root_info *info;
|
||||
|
||||
/* don't go for it if _CRS is used already */
|
||||
if (b->resource[0] != &ioport_resource ||
|
||||
b->resource[1] != &iomem_resource)
|
||||
return;
|
||||
|
||||
if (!pci_root_num)
|
||||
return;
|
||||
|
||||
/* for amd, if only one root bus, don't need to do anything */
|
||||
if (pci_root_num < 2 && found_all_numa_early)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pci_root_num; i++) {
|
||||
if (pci_root_info[i].bus_min == b->number)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == pci_root_num)
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
|
||||
b->number);
|
||||
|
||||
info = &pci_root_info[i];
|
||||
for (j = 0; j < info->res_num; j++) {
|
||||
struct resource *res;
|
||||
struct resource *root;
|
||||
|
||||
res = &info->res[j];
|
||||
b->resource[j] = res;
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
root = &ioport_resource;
|
||||
else
|
||||
root = &iomem_resource;
|
||||
insert_resource(root, res);
|
||||
}
|
||||
}
|
||||
|
||||
void __init update_res(struct pci_root_info *info, size_t start,
|
||||
size_t end, unsigned long flags, int merge)
|
||||
{
|
||||
int i;
|
||||
struct resource *res;
|
||||
|
||||
if (start > end)
|
||||
return;
|
||||
|
||||
if (!merge)
|
||||
goto addit;
|
||||
|
||||
/* try to merge it with old one */
|
||||
for (i = 0; i < info->res_num; i++) {
|
||||
size_t final_start, final_end;
|
||||
size_t common_start, common_end;
|
||||
|
||||
res = &info->res[i];
|
||||
if (res->flags != flags)
|
||||
continue;
|
||||
|
||||
common_start = max((size_t)res->start, start);
|
||||
common_end = min((size_t)res->end, end);
|
||||
if (common_start > common_end + 1)
|
||||
continue;
|
||||
|
||||
final_start = min((size_t)res->start, start);
|
||||
final_end = max((size_t)res->end, end);
|
||||
|
||||
res->start = final_start;
|
||||
res->end = final_end;
|
||||
return;
|
||||
}
|
||||
|
||||
addit:
|
||||
|
||||
/* need to add that */
|
||||
if (info->res_num >= RES_NUM)
|
||||
return;
|
||||
|
||||
res = &info->res[info->res_num];
|
||||
res->name = info->name;
|
||||
res->flags = flags;
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
res->child = NULL;
|
||||
info->res_num++;
|
||||
}
|
27
arch/x86/pci/bus_numa.h
Normal file
27
arch/x86/pci/bus_numa.h
Normal file
@@ -0,0 +1,27 @@
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
* sub bus (transparent) will use entres from 3 to store extra from
|
||||
* root, so need to make sure we have enough slot there, Should we
|
||||
* increase PCI_BUS_NUM_RESOURCES?
|
||||
*/
|
||||
#define RES_NUM 16
|
||||
struct pci_root_info {
|
||||
char name[12];
|
||||
unsigned int res_num;
|
||||
struct resource res[RES_NUM];
|
||||
int bus_min;
|
||||
int bus_max;
|
||||
int node;
|
||||
int link;
|
||||
};
|
||||
|
||||
/* 4 at this time, it may become to 32 */
|
||||
#define PCI_ROOT_NR 4
|
||||
extern int pci_root_num;
|
||||
extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
|
||||
extern int found_all_numa_early;
|
||||
|
||||
extern void update_res(struct pci_root_info *info, size_t start,
|
||||
size_t end, unsigned long flags, int merge);
|
||||
#endif
|
@@ -410,8 +410,6 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
|
||||
return bus;
|
||||
}
|
||||
|
||||
extern u8 pci_cache_line_size;
|
||||
|
||||
int __init pcibios_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
@@ -422,15 +420,19 @@ int __init pcibios_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
|
||||
* and P4. It's also good for 386/486s (which actually have 16)
|
||||
* Set PCI cacheline size to that of the CPU if the CPU has reported it.
|
||||
* (For older CPUs that don't support cpuid, we se it to 32 bytes
|
||||
* It's also good for 386/486s (which actually have 16)
|
||||
* as quite a few PCI devices do not support smaller values.
|
||||
*/
|
||||
pci_cache_line_size = 32 >> 2;
|
||||
if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
|
||||
pci_cache_line_size = 64 >> 2; /* K7 & K8 */
|
||||
else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
|
||||
pci_cache_line_size = 128 >> 2; /* P4 */
|
||||
if (c->x86_clflush_size > 0) {
|
||||
pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
|
||||
printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
|
||||
pci_dfl_cache_line_size << 2);
|
||||
} else {
|
||||
pci_dfl_cache_line_size = 32 >> 2;
|
||||
printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
|
||||
}
|
||||
|
||||
pcibios_resource_survey();
|
||||
|
||||
|
@@ -12,8 +12,6 @@ u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
|
||||
u32 v;
|
||||
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
|
||||
v = inl(0xcfc);
|
||||
if (v != 0xffffffff)
|
||||
pr_debug("%x reading 4 from %x: %x\n", slot, offset, v);
|
||||
return v;
|
||||
}
|
||||
|
||||
@@ -22,7 +20,6 @@ u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
|
||||
u8 v;
|
||||
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
|
||||
v = inb(0xcfc + (offset&3));
|
||||
pr_debug("%x reading 1 from %x: %x\n", slot, offset, v);
|
||||
return v;
|
||||
}
|
||||
|
||||
@@ -31,28 +28,24 @@ u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
|
||||
u16 v;
|
||||
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
|
||||
v = inw(0xcfc + (offset&2));
|
||||
pr_debug("%x reading 2 from %x: %x\n", slot, offset, v);
|
||||
return v;
|
||||
}
|
||||
|
||||
void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
|
||||
u32 val)
|
||||
{
|
||||
pr_debug("%x writing to %x: %x\n", slot, offset, val);
|
||||
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
|
||||
outl(val, 0xcfc);
|
||||
}
|
||||
|
||||
void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
|
||||
{
|
||||
pr_debug("%x writing to %x: %x\n", slot, offset, val);
|
||||
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
|
||||
outb(val, 0xcfc + (offset&3));
|
||||
}
|
||||
|
||||
void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
|
||||
{
|
||||
pr_debug("%x writing to %x: %x\n", slot, offset, val);
|
||||
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
|
||||
outw(val, 0xcfc + (offset&2));
|
||||
}
|
||||
|
@@ -129,7 +129,9 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
|
||||
continue;
|
||||
if (!r->start ||
|
||||
pci_claim_resource(dev, idx) < 0) {
|
||||
dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
|
||||
dev_info(&dev->dev,
|
||||
"can't reserve window %pR\n",
|
||||
r);
|
||||
/*
|
||||
* Something is wrong with the region.
|
||||
* Invalidate the resource to prevent
|
||||
@@ -144,16 +146,29 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
|
||||
}
|
||||
}
|
||||
|
||||
struct pci_check_idx_range {
|
||||
int start;
|
||||
int end;
|
||||
};
|
||||
|
||||
static void __init pcibios_allocate_resources(int pass)
|
||||
{
|
||||
struct pci_dev *dev = NULL;
|
||||
int idx, disabled;
|
||||
int idx, disabled, i;
|
||||
u16 command;
|
||||
struct resource *r;
|
||||
|
||||
struct pci_check_idx_range idx_range[] = {
|
||||
{ PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
{ PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
|
||||
#endif
|
||||
};
|
||||
|
||||
for_each_pci_dev(dev) {
|
||||
pci_read_config_word(dev, PCI_COMMAND, &command);
|
||||
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
|
||||
for (i = 0; i < ARRAY_SIZE(idx_range); i++)
|
||||
for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
|
||||
r = &dev->resource[idx];
|
||||
if (r->parent) /* Already allocated */
|
||||
continue;
|
||||
@@ -164,12 +179,12 @@ static void __init pcibios_allocate_resources(int pass)
|
||||
else
|
||||
disabled = !(command & PCI_COMMAND_MEMORY);
|
||||
if (pass == disabled) {
|
||||
dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n",
|
||||
(unsigned long long) r->start,
|
||||
(unsigned long long) r->end,
|
||||
r->flags, disabled, pass);
|
||||
dev_dbg(&dev->dev,
|
||||
"BAR %d: reserving %pr (d=%d, p=%d)\n",
|
||||
idx, r, disabled, pass);
|
||||
if (pci_claim_resource(dev, idx) < 0) {
|
||||
dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
|
||||
dev_info(&dev->dev,
|
||||
"can't reserve %pR\n", r);
|
||||
/* We'll assign a new address later */
|
||||
r->end -= r->start;
|
||||
r->start = 0;
|
||||
@@ -182,7 +197,7 @@ static void __init pcibios_allocate_resources(int pass)
|
||||
/* Turn the ROM off, leave the resource region,
|
||||
* but keep it unregistered. */
|
||||
u32 reg;
|
||||
dev_dbg(&dev->dev, "disabling ROM\n");
|
||||
dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
|
||||
r->flags &= ~IORESOURCE_ROM_ENABLE;
|
||||
pci_read_config_dword(dev,
|
||||
dev->rom_base_reg, ®);
|
||||
@@ -282,6 +297,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
return -EINVAL;
|
||||
|
||||
prot = pgprot_val(vma->vm_page_prot);
|
||||
|
||||
/*
|
||||
* Return error if pat is not enabled and write_combine is requested.
|
||||
* Caller can followup with UC MINUS request and add a WC mtrr if there
|
||||
* is a free mtrr slot.
|
||||
*/
|
||||
if (!pat_enabled && write_combine)
|
||||
return -EINVAL;
|
||||
|
||||
if (pat_enabled && write_combine)
|
||||
prot |= _PAGE_CACHE_WC;
|
||||
else if (pat_enabled || boot_cpu_data.x86 > 3)
|
||||
|
90
arch/x86/pci/intel_bus.c
Normal file
90
arch/x86/pci/intel_bus.c
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* to read io range from IOH pci conf, need to do it after mmconfig is there
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/pci_x86.h>
|
||||
|
||||
#include "bus_numa.h"
|
||||
|
||||
static inline void print_ioh_resources(struct pci_root_info *info)
|
||||
{
|
||||
int res_num;
|
||||
int busnum;
|
||||
int i;
|
||||
|
||||
printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n",
|
||||
info->bus_min, info->bus_max);
|
||||
res_num = info->res_num;
|
||||
busnum = info->bus_min;
|
||||
for (i = 0; i < res_num; i++) {
|
||||
struct resource *res;
|
||||
|
||||
res = &info->res[i];
|
||||
printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n",
|
||||
busnum, i,
|
||||
(res->flags & IORESOURCE_IO) ? "io port" :
|
||||
"mmio",
|
||||
res->start, res->end);
|
||||
}
|
||||
}
|
||||
|
||||
#define IOH_LIO 0x108
|
||||
#define IOH_LMMIOL 0x10c
|
||||
#define IOH_LMMIOH 0x110
|
||||
#define IOH_LMMIOH_BASEU 0x114
|
||||
#define IOH_LMMIOH_LIMITU 0x118
|
||||
#define IOH_LCFGBUS 0x11c
|
||||
|
||||
static void __devinit pci_root_bus_res(struct pci_dev *dev)
|
||||
{
|
||||
u16 word;
|
||||
u32 dword;
|
||||
struct pci_root_info *info;
|
||||
u16 io_base, io_end;
|
||||
u32 mmiol_base, mmiol_end;
|
||||
u64 mmioh_base, mmioh_end;
|
||||
int bus_base, bus_end;
|
||||
|
||||
if (pci_root_num >= PCI_ROOT_NR) {
|
||||
printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
|
||||
return;
|
||||
}
|
||||
|
||||
info = &pci_root_info[pci_root_num];
|
||||
pci_root_num++;
|
||||
|
||||
pci_read_config_word(dev, IOH_LCFGBUS, &word);
|
||||
bus_base = (word & 0xff);
|
||||
bus_end = (word & 0xff00) >> 8;
|
||||
sprintf(info->name, "PCI Bus #%02x", bus_base);
|
||||
info->bus_min = bus_base;
|
||||
info->bus_max = bus_end;
|
||||
|
||||
pci_read_config_word(dev, IOH_LIO, &word);
|
||||
io_base = (word & 0xf0) << (12 - 4);
|
||||
io_end = (word & 0xf000) | 0xfff;
|
||||
update_res(info, io_base, io_end, IORESOURCE_IO, 0);
|
||||
|
||||
pci_read_config_dword(dev, IOH_LMMIOL, &dword);
|
||||
mmiol_base = (dword & 0xff00) << (24 - 8);
|
||||
mmiol_end = (dword & 0xff000000) | 0xffffff;
|
||||
update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0);
|
||||
|
||||
pci_read_config_dword(dev, IOH_LMMIOH, &dword);
|
||||
mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10);
|
||||
mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff);
|
||||
pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword);
|
||||
mmioh_base |= ((u64)(dword & 0x7ffff)) << 32;
|
||||
pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword);
|
||||
mmioh_end |= ((u64)(dword & 0x7ffff)) << 32;
|
||||
update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0);
|
||||
|
||||
print_ioh_resources(info);
|
||||
}
|
||||
|
||||
/* intel IOH */
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res);
|
@@ -15,48 +15,98 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/sfi_acpi.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/pci_x86.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
#define PREFIX "PCI: "
|
||||
|
||||
/* aperture is up to 256MB but BIOS may reserve less */
|
||||
#define MMCONFIG_APER_MIN (2 * 1024*1024)
|
||||
#define MMCONFIG_APER_MAX (256 * 1024*1024)
|
||||
|
||||
/* Indicate if the mmcfg resources have been placed into the resource table. */
|
||||
static int __initdata pci_mmcfg_resources_inserted;
|
||||
|
||||
static __init int extend_mmcfg(int num)
|
||||
LIST_HEAD(pci_mmcfg_list);
|
||||
|
||||
static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
|
||||
{
|
||||
struct acpi_mcfg_allocation *new;
|
||||
int new_num = pci_mmcfg_config_num + num;
|
||||
|
||||
new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL);
|
||||
if (!new)
|
||||
return -1;
|
||||
|
||||
if (pci_mmcfg_config) {
|
||||
memcpy(new, pci_mmcfg_config,
|
||||
sizeof(pci_mmcfg_config[0]) * new_num);
|
||||
kfree(pci_mmcfg_config);
|
||||
}
|
||||
pci_mmcfg_config = new;
|
||||
|
||||
return 0;
|
||||
if (cfg->res.parent)
|
||||
release_resource(&cfg->res);
|
||||
list_del(&cfg->list);
|
||||
kfree(cfg);
|
||||
}
|
||||
|
||||
static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end)
|
||||
static __init void free_all_mmcfg(void)
|
||||
{
|
||||
int i = pci_mmcfg_config_num;
|
||||
struct pci_mmcfg_region *cfg, *tmp;
|
||||
|
||||
pci_mmcfg_config_num++;
|
||||
pci_mmcfg_config[i].address = addr;
|
||||
pci_mmcfg_config[i].pci_segment = segment;
|
||||
pci_mmcfg_config[i].start_bus_number = start;
|
||||
pci_mmcfg_config[i].end_bus_number = end;
|
||||
pci_mmcfg_arch_free();
|
||||
list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
|
||||
pci_mmconfig_remove(cfg);
|
||||
}
|
||||
|
||||
static __init void list_add_sorted(struct pci_mmcfg_region *new)
|
||||
{
|
||||
struct pci_mmcfg_region *cfg;
|
||||
|
||||
/* keep list sorted by segment and starting bus number */
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
|
||||
if (cfg->segment > new->segment ||
|
||||
(cfg->segment == new->segment &&
|
||||
cfg->start_bus >= new->start_bus)) {
|
||||
list_add_tail(&new->list, &cfg->list);
|
||||
return;
|
||||
}
|
||||
}
|
||||
list_add_tail(&new->list, &pci_mmcfg_list);
|
||||
}
|
||||
|
||||
static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
|
||||
int end, u64 addr)
|
||||
{
|
||||
struct pci_mmcfg_region *new;
|
||||
int num_buses;
|
||||
struct resource *res;
|
||||
|
||||
if (addr == 0)
|
||||
return NULL;
|
||||
|
||||
new = kzalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
new->address = addr;
|
||||
new->segment = segment;
|
||||
new->start_bus = start;
|
||||
new->end_bus = end;
|
||||
|
||||
list_add_sorted(new);
|
||||
|
||||
num_buses = end - start + 1;
|
||||
res = &new->res;
|
||||
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
|
||||
res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
|
||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
|
||||
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
|
||||
res->name = new->name;
|
||||
|
||||
printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
|
||||
"%pR (base %#lx)\n", segment, start, end, &new->res,
|
||||
(unsigned long) addr);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
|
||||
{
|
||||
struct pci_mmcfg_region *cfg;
|
||||
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list)
|
||||
if (cfg->segment == segment &&
|
||||
cfg->start_bus <= bus && bus <= cfg->end_bus)
|
||||
return cfg;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const char __init *pci_mmcfg_e7520(void)
|
||||
@@ -68,11 +118,9 @@ static const char __init *pci_mmcfg_e7520(void)
|
||||
if (win == 0x0000 || win == 0xf000)
|
||||
return NULL;
|
||||
|
||||
if (extend_mmcfg(1) == -1)
|
||||
if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
|
||||
return NULL;
|
||||
|
||||
fill_one_mmcfg(win << 16, 0, 0, 255);
|
||||
|
||||
return "Intel Corporation E7520 Memory Controller Hub";
|
||||
}
|
||||
|
||||
@@ -114,11 +162,9 @@ static const char __init *pci_mmcfg_intel_945(void)
|
||||
if ((pciexbar & mask) >= 0xf0000000U)
|
||||
return NULL;
|
||||
|
||||
if (extend_mmcfg(1) == -1)
|
||||
if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
|
||||
return NULL;
|
||||
|
||||
fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
|
||||
|
||||
return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
|
||||
}
|
||||
|
||||
@@ -127,7 +173,7 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
|
||||
u32 low, high, address;
|
||||
u64 base, msr;
|
||||
int i;
|
||||
unsigned segnbits = 0, busnbits;
|
||||
unsigned segnbits = 0, busnbits, end_bus;
|
||||
|
||||
if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
|
||||
return NULL;
|
||||
@@ -161,11 +207,13 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
|
||||
busnbits = 8;
|
||||
}
|
||||
|
||||
if (extend_mmcfg(1 << segnbits) == -1)
|
||||
return NULL;
|
||||
|
||||
end_bus = (1 << busnbits) - 1;
|
||||
for (i = 0; i < (1 << segnbits); i++)
|
||||
fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1);
|
||||
if (pci_mmconfig_add(i, 0, end_bus,
|
||||
base + (1<<28) * i) == NULL) {
|
||||
free_all_mmcfg();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return "AMD Family 10h NB";
|
||||
}
|
||||
@@ -190,7 +238,7 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
|
||||
/*
|
||||
* do check if amd fam10h already took over
|
||||
*/
|
||||
if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked)
|
||||
if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
|
||||
return NULL;
|
||||
|
||||
mcp55_checked = true;
|
||||
@@ -213,16 +261,14 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
|
||||
if (!(extcfg & extcfg_enable_mask))
|
||||
continue;
|
||||
|
||||
if (extend_mmcfg(1) == -1)
|
||||
continue;
|
||||
|
||||
size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
|
||||
base = extcfg & extcfg_base_mask[size_index];
|
||||
/* base could > 4G */
|
||||
base <<= extcfg_base_lshift;
|
||||
start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
|
||||
end = start + extcfg_sizebus[size_index] - 1;
|
||||
fill_one_mmcfg(base, 0, start, end);
|
||||
if (pci_mmconfig_add(0, start, end, base) == NULL)
|
||||
continue;
|
||||
mcp55_mmconf_found++;
|
||||
}
|
||||
|
||||
@@ -253,45 +299,27 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
|
||||
0x0369, pci_mmcfg_nvidia_mcp55 },
|
||||
};
|
||||
|
||||
static int __init cmp_mmcfg(const void *x1, const void *x2)
|
||||
{
|
||||
const typeof(pci_mmcfg_config[0]) *m1 = x1;
|
||||
const typeof(pci_mmcfg_config[0]) *m2 = x2;
|
||||
int start1, start2;
|
||||
|
||||
start1 = m1->start_bus_number;
|
||||
start2 = m2->start_bus_number;
|
||||
|
||||
return start1 - start2;
|
||||
}
|
||||
|
||||
static void __init pci_mmcfg_check_end_bus_number(void)
|
||||
{
|
||||
int i;
|
||||
typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
|
||||
|
||||
/* sort them at first */
|
||||
sort(pci_mmcfg_config, pci_mmcfg_config_num,
|
||||
sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
|
||||
struct pci_mmcfg_region *cfg, *cfgx;
|
||||
|
||||
/* last one*/
|
||||
if (pci_mmcfg_config_num > 0) {
|
||||
i = pci_mmcfg_config_num - 1;
|
||||
cfg = &pci_mmcfg_config[i];
|
||||
if (cfg->end_bus_number < cfg->start_bus_number)
|
||||
cfg->end_bus_number = 255;
|
||||
}
|
||||
cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
|
||||
if (cfg)
|
||||
if (cfg->end_bus < cfg->start_bus)
|
||||
cfg->end_bus = 255;
|
||||
|
||||
if (list_is_singular(&pci_mmcfg_list))
|
||||
return;
|
||||
|
||||
/* don't overlap please */
|
||||
for (i = 0; i < pci_mmcfg_config_num - 1; i++) {
|
||||
cfg = &pci_mmcfg_config[i];
|
||||
cfgx = &pci_mmcfg_config[i+1];
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
|
||||
if (cfg->end_bus < cfg->start_bus)
|
||||
cfg->end_bus = 255;
|
||||
|
||||
if (cfg->end_bus_number < cfg->start_bus_number)
|
||||
cfg->end_bus_number = 255;
|
||||
|
||||
if (cfg->end_bus_number >= cfgx->start_bus_number)
|
||||
cfg->end_bus_number = cfgx->start_bus_number - 1;
|
||||
cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
|
||||
if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
|
||||
cfg->end_bus = cfgx->start_bus - 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,8 +334,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
|
||||
if (!raw_pci_ops)
|
||||
return 0;
|
||||
|
||||
pci_mmcfg_config_num = 0;
|
||||
pci_mmcfg_config = NULL;
|
||||
free_all_mmcfg();
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
|
||||
bus = pci_mmcfg_probes[i].bus;
|
||||
@@ -322,45 +349,22 @@ static int __init pci_mmcfg_check_hostbridge(void)
|
||||
name = pci_mmcfg_probes[i].probe();
|
||||
|
||||
if (name)
|
||||
printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n",
|
||||
printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
|
||||
name);
|
||||
}
|
||||
|
||||
/* some end_bus_number is crazy, fix it */
|
||||
pci_mmcfg_check_end_bus_number();
|
||||
|
||||
return pci_mmcfg_config_num != 0;
|
||||
return !list_empty(&pci_mmcfg_list);
|
||||
}
|
||||
|
||||
static void __init pci_mmcfg_insert_resources(void)
|
||||
{
|
||||
#define PCI_MMCFG_RESOURCE_NAME_LEN 24
|
||||
int i;
|
||||
struct resource *res;
|
||||
char *names;
|
||||
unsigned num_buses;
|
||||
struct pci_mmcfg_region *cfg;
|
||||
|
||||
res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
|
||||
pci_mmcfg_config_num, GFP_KERNEL);
|
||||
if (!res) {
|
||||
printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
|
||||
return;
|
||||
}
|
||||
|
||||
names = (void *)&res[pci_mmcfg_config_num];
|
||||
for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
|
||||
struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
|
||||
num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
|
||||
res->name = names;
|
||||
snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
|
||||
"PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
|
||||
cfg->start_bus_number, cfg->end_bus_number);
|
||||
res->start = cfg->address + (cfg->start_bus_number << 20);
|
||||
res->end = res->start + (num_buses << 20) - 1;
|
||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
insert_resource(&iomem_resource, res);
|
||||
names += PCI_MMCFG_RESOURCE_NAME_LEN;
|
||||
}
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list)
|
||||
insert_resource(&iomem_resource, &cfg->res);
|
||||
|
||||
/* Mark that the resources have been inserted. */
|
||||
pci_mmcfg_resources_inserted = 1;
|
||||
@@ -437,11 +441,12 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
|
||||
typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
|
||||
|
||||
static int __init is_mmconf_reserved(check_reserved_t is_reserved,
|
||||
u64 addr, u64 size, int i,
|
||||
typeof(pci_mmcfg_config[0]) *cfg, int with_e820)
|
||||
struct pci_mmcfg_region *cfg, int with_e820)
|
||||
{
|
||||
u64 addr = cfg->res.start;
|
||||
u64 size = resource_size(&cfg->res);
|
||||
u64 old_size = size;
|
||||
int valid = 0;
|
||||
int valid = 0, num_buses;
|
||||
|
||||
while (!is_reserved(addr, addr + size, E820_RESERVED)) {
|
||||
size >>= 1;
|
||||
@@ -450,19 +455,25 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
|
||||
}
|
||||
|
||||
if (size >= (16UL<<20) || size == old_size) {
|
||||
printk(KERN_NOTICE
|
||||
"PCI: MCFG area at %Lx reserved in %s\n",
|
||||
addr, with_e820?"E820":"ACPI motherboard resources");
|
||||
printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
|
||||
&cfg->res,
|
||||
with_e820 ? "E820" : "ACPI motherboard resources");
|
||||
valid = 1;
|
||||
|
||||
if (old_size != size) {
|
||||
/* update end_bus_number */
|
||||
cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1);
|
||||
printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx "
|
||||
"segment %hu buses %u - %u\n",
|
||||
i, (unsigned long)cfg->address, cfg->pci_segment,
|
||||
(unsigned int)cfg->start_bus_number,
|
||||
(unsigned int)cfg->end_bus_number);
|
||||
/* update end_bus */
|
||||
cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
|
||||
num_buses = cfg->end_bus - cfg->start_bus + 1;
|
||||
cfg->res.end = cfg->res.start +
|
||||
PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
|
||||
snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
|
||||
"PCI MMCONFIG %04x [bus %02x-%02x]",
|
||||
cfg->segment, cfg->start_bus, cfg->end_bus);
|
||||
printk(KERN_INFO PREFIX
|
||||
"MMCONFIG for %04x [bus%02x-%02x] "
|
||||
"at %pR (base %#lx) (size reduced!)\n",
|
||||
cfg->segment, cfg->start_bus, cfg->end_bus,
|
||||
&cfg->res, (unsigned long) cfg->address);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,45 +482,26 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
|
||||
|
||||
static void __init pci_mmcfg_reject_broken(int early)
|
||||
{
|
||||
typeof(pci_mmcfg_config[0]) *cfg;
|
||||
int i;
|
||||
struct pci_mmcfg_region *cfg;
|
||||
|
||||
if ((pci_mmcfg_config_num == 0) ||
|
||||
(pci_mmcfg_config == NULL) ||
|
||||
(pci_mmcfg_config[0].address == 0))
|
||||
return;
|
||||
|
||||
for (i = 0; i < pci_mmcfg_config_num; i++) {
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
|
||||
int valid = 0;
|
||||
u64 addr, size;
|
||||
|
||||
cfg = &pci_mmcfg_config[i];
|
||||
addr = cfg->start_bus_number;
|
||||
addr <<= 20;
|
||||
addr += cfg->address;
|
||||
size = cfg->end_bus_number + 1 - cfg->start_bus_number;
|
||||
size <<= 20;
|
||||
printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
|
||||
"segment %hu buses %u - %u\n",
|
||||
i, (unsigned long)cfg->address, cfg->pci_segment,
|
||||
(unsigned int)cfg->start_bus_number,
|
||||
(unsigned int)cfg->end_bus_number);
|
||||
|
||||
if (!early && !acpi_disabled)
|
||||
valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0);
|
||||
valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
|
||||
|
||||
if (valid)
|
||||
continue;
|
||||
|
||||
if (!early)
|
||||
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not"
|
||||
" reserved in ACPI motherboard resources\n",
|
||||
cfg->address);
|
||||
printk(KERN_ERR FW_BUG PREFIX
|
||||
"MMCONFIG at %pR not reserved in "
|
||||
"ACPI motherboard resources\n", &cfg->res);
|
||||
|
||||
/* Don't try to do this check unless configuration
|
||||
type 1 is available. how about type 2 ?*/
|
||||
if (raw_pci_ops)
|
||||
valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1);
|
||||
valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
|
||||
|
||||
if (!valid)
|
||||
goto reject;
|
||||
@@ -518,34 +510,41 @@ static void __init pci_mmcfg_reject_broken(int early)
|
||||
return;
|
||||
|
||||
reject:
|
||||
printk(KERN_INFO "PCI: Not using MMCONFIG.\n");
|
||||
pci_mmcfg_arch_free();
|
||||
kfree(pci_mmcfg_config);
|
||||
pci_mmcfg_config = NULL;
|
||||
pci_mmcfg_config_num = 0;
|
||||
printk(KERN_INFO PREFIX "not using MMCONFIG\n");
|
||||
free_all_mmcfg();
|
||||
}
|
||||
|
||||
static int __initdata known_bridge;
|
||||
|
||||
static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
|
||||
|
||||
/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
|
||||
struct acpi_mcfg_allocation *pci_mmcfg_config;
|
||||
int pci_mmcfg_config_num;
|
||||
|
||||
static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
|
||||
static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
|
||||
struct acpi_mcfg_allocation *cfg)
|
||||
{
|
||||
if (!strcmp(mcfg->header.oem_id, "SGI"))
|
||||
acpi_mcfg_64bit_base_addr = TRUE;
|
||||
int year;
|
||||
|
||||
return 0;
|
||||
if (cfg->address < 0xFFFFFFFF)
|
||||
return 0;
|
||||
|
||||
if (!strcmp(mcfg->header.oem_id, "SGI"))
|
||||
return 0;
|
||||
|
||||
if (mcfg->header.revision >= 1) {
|
||||
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
|
||||
year >= 2010)
|
||||
return 0;
|
||||
}
|
||||
|
||||
printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
|
||||
"is above 4GB, ignored\n", cfg->pci_segment,
|
||||
cfg->start_bus_number, cfg->end_bus_number, cfg->address);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init pci_parse_mcfg(struct acpi_table_header *header)
|
||||
{
|
||||
struct acpi_table_mcfg *mcfg;
|
||||
struct acpi_mcfg_allocation *cfg_table, *cfg;
|
||||
unsigned long i;
|
||||
int config_size;
|
||||
int entries;
|
||||
|
||||
if (!header)
|
||||
return -EINVAL;
|
||||
@@ -553,38 +552,33 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
|
||||
mcfg = (struct acpi_table_mcfg *)header;
|
||||
|
||||
/* how many config structures do we have */
|
||||
pci_mmcfg_config_num = 0;
|
||||
free_all_mmcfg();
|
||||
entries = 0;
|
||||
i = header->length - sizeof(struct acpi_table_mcfg);
|
||||
while (i >= sizeof(struct acpi_mcfg_allocation)) {
|
||||
++pci_mmcfg_config_num;
|
||||
entries++;
|
||||
i -= sizeof(struct acpi_mcfg_allocation);
|
||||
};
|
||||
if (pci_mmcfg_config_num == 0) {
|
||||
if (entries == 0) {
|
||||
printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
|
||||
pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
|
||||
if (!pci_mmcfg_config) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"No memory for MCFG config tables\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(pci_mmcfg_config, &mcfg[1], config_size);
|
||||
|
||||
acpi_mcfg_oem_check(mcfg);
|
||||
|
||||
for (i = 0; i < pci_mmcfg_config_num; ++i) {
|
||||
if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
|
||||
!acpi_mcfg_64bit_base_addr) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"MMCONFIG not in low 4GB of memory\n");
|
||||
kfree(pci_mmcfg_config);
|
||||
pci_mmcfg_config_num = 0;
|
||||
cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
|
||||
for (i = 0; i < entries; i++) {
|
||||
cfg = &cfg_table[i];
|
||||
if (acpi_mcfg_check_entry(mcfg, cfg)) {
|
||||
free_all_mmcfg();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
|
||||
cfg->end_bus_number, cfg->address) == NULL) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"no memory for MCFG entries\n");
|
||||
free_all_mmcfg();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -614,9 +608,7 @@ static void __init __pci_mmcfg_init(int early)
|
||||
|
||||
pci_mmcfg_reject_broken(early);
|
||||
|
||||
if ((pci_mmcfg_config_num == 0) ||
|
||||
(pci_mmcfg_config == NULL) ||
|
||||
(pci_mmcfg_config[0].address == 0))
|
||||
if (list_empty(&pci_mmcfg_list))
|
||||
return;
|
||||
|
||||
if (pci_mmcfg_arch_init())
|
||||
@@ -648,9 +640,7 @@ static int __init pci_mmcfg_late_insert_resources(void)
|
||||
*/
|
||||
if ((pci_mmcfg_resources_inserted == 1) ||
|
||||
(pci_probe & PCI_PROBE_MMCONF) == 0 ||
|
||||
(pci_mmcfg_config_num == 0) ||
|
||||
(pci_mmcfg_config == NULL) ||
|
||||
(pci_mmcfg_config[0].address == 0))
|
||||
list_empty(&pci_mmcfg_list))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
|
@@ -27,18 +27,10 @@ static int mmcfg_last_accessed_cpu;
|
||||
*/
|
||||
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
|
||||
{
|
||||
struct acpi_mcfg_allocation *cfg;
|
||||
int cfg_num;
|
||||
struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
|
||||
|
||||
for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
|
||||
cfg = &pci_mmcfg_config[cfg_num];
|
||||
if (cfg->pci_segment == seg &&
|
||||
(cfg->start_bus_number <= bus) &&
|
||||
(cfg->end_bus_number >= bus))
|
||||
return cfg->address;
|
||||
}
|
||||
|
||||
/* Fall back to type 0 */
|
||||
if (cfg)
|
||||
return cfg->address;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -47,7 +39,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
|
||||
*/
|
||||
static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
|
||||
{
|
||||
u32 dev_base = base | (bus << 20) | (devfn << 12);
|
||||
u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
|
||||
int cpu = smp_processor_id();
|
||||
if (dev_base != mmcfg_last_accessed_device ||
|
||||
cpu != mmcfg_last_accessed_cpu) {
|
||||
|
@@ -12,38 +12,15 @@
|
||||
#include <asm/e820.h>
|
||||
#include <asm/pci_x86.h>
|
||||
|
||||
/* Static virtual mapping of the MMCONFIG aperture */
|
||||
struct mmcfg_virt {
|
||||
struct acpi_mcfg_allocation *cfg;
|
||||
char __iomem *virt;
|
||||
};
|
||||
static struct mmcfg_virt *pci_mmcfg_virt;
|
||||
|
||||
static char __iomem *get_virt(unsigned int seg, unsigned bus)
|
||||
{
|
||||
struct acpi_mcfg_allocation *cfg;
|
||||
int cfg_num;
|
||||
|
||||
for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
|
||||
cfg = pci_mmcfg_virt[cfg_num].cfg;
|
||||
if (cfg->pci_segment == seg &&
|
||||
(cfg->start_bus_number <= bus) &&
|
||||
(cfg->end_bus_number >= bus))
|
||||
return pci_mmcfg_virt[cfg_num].virt;
|
||||
}
|
||||
|
||||
/* Fall back to type 0 */
|
||||
return NULL;
|
||||
}
|
||||
#define PREFIX "PCI: "
|
||||
|
||||
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
|
||||
{
|
||||
char __iomem *addr;
|
||||
struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
|
||||
|
||||
addr = get_virt(seg, bus);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
return addr + ((bus << 20) | (devfn << 12));
|
||||
if (cfg && cfg->virt)
|
||||
return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
|
||||
@@ -109,42 +86,30 @@ static struct pci_raw_ops pci_mmcfg = {
|
||||
.write = pci_mmcfg_write,
|
||||
};
|
||||
|
||||
static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
|
||||
static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg)
|
||||
{
|
||||
void __iomem *addr;
|
||||
u64 start, size;
|
||||
int num_buses;
|
||||
|
||||
start = cfg->start_bus_number;
|
||||
start <<= 20;
|
||||
start += cfg->address;
|
||||
size = cfg->end_bus_number + 1 - cfg->start_bus_number;
|
||||
size <<= 20;
|
||||
start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
|
||||
num_buses = cfg->end_bus - cfg->start_bus + 1;
|
||||
size = PCI_MMCFG_BUS_OFFSET(num_buses);
|
||||
addr = ioremap_nocache(start, size);
|
||||
if (addr) {
|
||||
printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n",
|
||||
start, start + size - 1);
|
||||
addr -= cfg->start_bus_number << 20;
|
||||
}
|
||||
if (addr)
|
||||
addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
|
||||
return addr;
|
||||
}
|
||||
|
||||
int __init pci_mmcfg_arch_init(void)
|
||||
{
|
||||
int i;
|
||||
pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) *
|
||||
pci_mmcfg_config_num, GFP_KERNEL);
|
||||
if (pci_mmcfg_virt == NULL) {
|
||||
printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
|
||||
return 0;
|
||||
}
|
||||
struct pci_mmcfg_region *cfg;
|
||||
|
||||
for (i = 0; i < pci_mmcfg_config_num; ++i) {
|
||||
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
|
||||
pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]);
|
||||
if (!pci_mmcfg_virt[i].virt) {
|
||||
printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
|
||||
"segment %d\n",
|
||||
pci_mmcfg_config[i].pci_segment);
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
|
||||
cfg->virt = mcfg_ioremap(cfg);
|
||||
if (!cfg->virt) {
|
||||
printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n",
|
||||
&cfg->res);
|
||||
pci_mmcfg_arch_free();
|
||||
return 0;
|
||||
}
|
||||
@@ -155,19 +120,12 @@ int __init pci_mmcfg_arch_init(void)
|
||||
|
||||
void __init pci_mmcfg_arch_free(void)
|
||||
{
|
||||
int i;
|
||||
struct pci_mmcfg_region *cfg;
|
||||
|
||||
if (pci_mmcfg_virt == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pci_mmcfg_config_num; ++i) {
|
||||
if (pci_mmcfg_virt[i].virt) {
|
||||
iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20));
|
||||
pci_mmcfg_virt[i].virt = NULL;
|
||||
pci_mmcfg_virt[i].cfg = NULL;
|
||||
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
|
||||
if (cfg->virt) {
|
||||
iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
|
||||
cfg->virt = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(pci_mmcfg_virt);
|
||||
pci_mmcfg_virt = NULL;
|
||||
}
|
||||
|
@@ -113,7 +113,7 @@ int main(int argc, char **argv)
|
||||
char line[BUFSIZE], sym[BUFSIZE] = "<unknown>";
|
||||
unsigned char insn_buf[16];
|
||||
struct insn insn;
|
||||
int insns = 0, c;
|
||||
int insns = 0;
|
||||
int warnings = 0;
|
||||
|
||||
parse_args(argc, argv);
|
||||
|
@@ -27,7 +27,9 @@
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/version.h>
|
||||
#include <xen/interface/physdev.h>
|
||||
@@ -1175,7 +1177,11 @@ asmlinkage void __init xen_start_kernel(void)
|
||||
add_preferred_console("xenboot", 0, NULL);
|
||||
add_preferred_console("tty", 0, NULL);
|
||||
add_preferred_console("hvc", 0, NULL);
|
||||
} else {
|
||||
/* Make sure ACS will be enabled */
|
||||
pci_request_acs();
|
||||
}
|
||||
|
||||
|
||||
xen_raw_console_write("about to get started...\n");
|
||||
|
||||
|
@@ -35,10 +35,10 @@
|
||||
|
||||
cpumask_var_t xen_cpu_initialized_map;
|
||||
|
||||
static DEFINE_PER_CPU(int, resched_irq);
|
||||
static DEFINE_PER_CPU(int, callfunc_irq);
|
||||
static DEFINE_PER_CPU(int, callfuncsingle_irq);
|
||||
static DEFINE_PER_CPU(int, debug_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_resched_irq);
|
||||
static DEFINE_PER_CPU(int, xen_callfunc_irq);
|
||||
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
|
||||
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
|
||||
|
||||
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
|
||||
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
|
||||
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||
NULL);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
per_cpu(resched_irq, cpu) = rc;
|
||||
per_cpu(xen_resched_irq, cpu) = rc;
|
||||
|
||||
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
|
||||
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
|
||||
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||
NULL);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
per_cpu(callfunc_irq, cpu) = rc;
|
||||
per_cpu(xen_callfunc_irq, cpu) = rc;
|
||||
|
||||
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
|
||||
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
|
||||
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||
debug_name, NULL);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
per_cpu(debug_irq, cpu) = rc;
|
||||
per_cpu(xen_debug_irq, cpu) = rc;
|
||||
|
||||
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
|
||||
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
|
||||
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||
NULL);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
per_cpu(callfuncsingle_irq, cpu) = rc;
|
||||
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (per_cpu(resched_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
|
||||
if (per_cpu(callfunc_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
|
||||
if (per_cpu(debug_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
|
||||
if (per_cpu(callfuncsingle_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
|
||||
if (per_cpu(xen_resched_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
|
||||
if (per_cpu(xen_callfunc_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
|
||||
if (per_cpu(xen_debug_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
|
||||
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
|
||||
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
|
||||
NULL);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
|
||||
current->state = TASK_UNINTERRUPTIBLE;
|
||||
schedule_timeout(HZ/10);
|
||||
}
|
||||
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
|
||||
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
|
||||
xen_uninit_lock_cpu(cpu);
|
||||
xen_teardown_timer(cpu);
|
||||
|
||||
|
@@ -120,14 +120,14 @@ struct xen_spinlock {
|
||||
unsigned short spinners; /* count of waiting cpus */
|
||||
};
|
||||
|
||||
static int xen_spin_is_locked(struct raw_spinlock *lock)
|
||||
static int xen_spin_is_locked(struct arch_spinlock *lock)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
|
||||
return xl->lock != 0;
|
||||
}
|
||||
|
||||
static int xen_spin_is_contended(struct raw_spinlock *lock)
|
||||
static int xen_spin_is_contended(struct arch_spinlock *lock)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
|
||||
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
|
||||
return xl->spinners != 0;
|
||||
}
|
||||
|
||||
static int xen_spin_trylock(struct raw_spinlock *lock)
|
||||
static int xen_spin_trylock(struct arch_spinlock *lock)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
u8 old = 1;
|
||||
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
|
||||
__get_cpu_var(lock_spinners) = prev;
|
||||
}
|
||||
|
||||
static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
|
||||
static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
struct xen_spinlock *prev;
|
||||
@@ -254,7 +254,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
|
||||
static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
unsigned timeout;
|
||||
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
|
||||
spin_time_accum_total(start_spin);
|
||||
}
|
||||
|
||||
static void xen_spin_lock(struct raw_spinlock *lock)
|
||||
static void xen_spin_lock(struct arch_spinlock *lock)
|
||||
{
|
||||
__xen_spin_lock(lock, false);
|
||||
}
|
||||
|
||||
static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
|
||||
static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
|
||||
{
|
||||
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
|
||||
}
|
||||
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
|
||||
}
|
||||
}
|
||||
|
||||
static void xen_spin_unlock(struct raw_spinlock *lock)
|
||||
static void xen_spin_unlock(struct arch_spinlock *lock)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
|
||||
|
@@ -31,14 +31,14 @@
|
||||
#define NS_PER_TICK (1000000000LL / HZ)
|
||||
|
||||
/* runstate info updated by Xen */
|
||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
|
||||
|
||||
/* snapshots of runstate info */
|
||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
|
||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
|
||||
|
||||
/* unused ns of stolen and blocked time */
|
||||
static DEFINE_PER_CPU(u64, residual_stolen);
|
||||
static DEFINE_PER_CPU(u64, residual_blocked);
|
||||
static DEFINE_PER_CPU(u64, xen_residual_stolen);
|
||||
static DEFINE_PER_CPU(u64, xen_residual_blocked);
|
||||
|
||||
/* return an consistent snapshot of 64-bit time/counter value */
|
||||
static u64 get64(const u64 *p)
|
||||
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
|
||||
|
||||
BUG_ON(preemptible());
|
||||
|
||||
state = &__get_cpu_var(runstate);
|
||||
state = &__get_cpu_var(xen_runstate);
|
||||
|
||||
/*
|
||||
* The runstate info is always updated by the hypervisor on
|
||||
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
|
||||
/* return true when a vcpu could run but has no real cpu to run on */
|
||||
bool xen_vcpu_stolen(int vcpu)
|
||||
{
|
||||
return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
|
||||
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
|
||||
}
|
||||
|
||||
void xen_setup_runstate_info(int cpu)
|
||||
{
|
||||
struct vcpu_register_runstate_memory_area area;
|
||||
|
||||
area.addr.v = &per_cpu(runstate, cpu);
|
||||
area.addr.v = &per_cpu(xen_runstate, cpu);
|
||||
|
||||
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
|
||||
cpu, &area))
|
||||
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
|
||||
|
||||
WARN_ON(state.state != RUNSTATE_running);
|
||||
|
||||
snap = &__get_cpu_var(runstate_snapshot);
|
||||
snap = &__get_cpu_var(xen_runstate_snapshot);
|
||||
|
||||
/* work out how much time the VCPU has not been runn*ing* */
|
||||
blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
|
||||
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
|
||||
|
||||
/* Add the appropriate number of ticks of stolen time,
|
||||
including any left-overs from last time. */
|
||||
stolen = runnable + offline + __get_cpu_var(residual_stolen);
|
||||
stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
|
||||
|
||||
if (stolen < 0)
|
||||
stolen = 0;
|
||||
|
||||
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
|
||||
__get_cpu_var(residual_stolen) = stolen;
|
||||
__get_cpu_var(xen_residual_stolen) = stolen;
|
||||
account_steal_ticks(ticks);
|
||||
|
||||
/* Add the appropriate number of ticks of blocked time,
|
||||
including any left-overs from last time. */
|
||||
blocked += __get_cpu_var(residual_blocked);
|
||||
blocked += __get_cpu_var(xen_residual_blocked);
|
||||
|
||||
if (blocked < 0)
|
||||
blocked = 0;
|
||||
|
||||
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
|
||||
__get_cpu_var(residual_blocked) = blocked;
|
||||
__get_cpu_var(xen_residual_blocked) = blocked;
|
||||
account_idle_ticks(ticks);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user