Merge commit 'origin/master' into next

Manual merge of:
	arch/powerpc/include/asm/elf.h
	drivers/i2c/busses/i2c-mpc.c
This commit is contained in:
Benjamin Herrenschmidt
2009-03-30 14:04:53 +11:00
4402 changed files with 317171 additions and 163158 deletions

View File

@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
savesegment(gs, dump->regs.gs);
dump->regs.gs = get_user_gs(regs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;

View File

@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
acpi_noirq = 1;
}
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
#define FIX_ACPI_PAGES 4
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }

View File

@@ -0,0 +1,11 @@
#ifndef ASM_X86_AES_H
#define ASM_X86_AES_H
#include <linux/crypto.h>
#include <crypto/aes.h>
void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
const u8 *src);
void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
const u8 *src);
#endif

View File

@@ -1,15 +1,18 @@
#ifndef _ASM_X86_APIC_H
#define _ASM_X86_APIC_H
#include <linux/pm.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <asm/alternative.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/system.h>
#include <asm/msr.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -33,7 +36,13 @@
} while (0)
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
extern void generic_apic_probe(void);
#else
static inline void generic_apic_probe(void)
{
}
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -41,6 +50,21 @@ extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
#ifdef CONFIG_SMP
extern void __inquire_remote_apic(int apicid);
#else /* CONFIG_SMP */
static inline void __inquire_remote_apic(int apicid)
{
}
#endif /* CONFIG_SMP */
static inline void default_inquire_remote_apic(int apicid)
{
if (apic_verbosity >= APIC_DEBUG)
__inquire_remote_apic(apicid);
}
/*
* Basic functions accessing APICs.
*/
@@ -51,7 +75,14 @@ extern int disable_apic;
#define setup_secondary_clock setup_secondary_APIC_clock
#endif
#ifdef CONFIG_X86_VSMP
extern int is_vsmp_box(void);
#else
static inline int is_vsmp_box(void)
{
return 0;
}
#endif
extern void xapic_wait_icr_idle(void);
extern u32 safe_xapic_wait_icr_idle(void);
extern void xapic_icr_write(u32, u32);
@@ -71,6 +102,12 @@ static inline u32 native_apic_mem_read(u32 reg)
return *((volatile u32 *)(APIC_BASE + reg));
}
extern void native_apic_wait_icr_idle(void);
extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void);
#ifdef CONFIG_X86_X2APIC
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
@@ -91,8 +128,32 @@ static inline u32 native_apic_msr_read(u32 reg)
return low;
}
#ifndef CONFIG_X86_32
extern int x2apic;
static inline void native_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return;
}
static inline u32 native_safe_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return 0;
}
static inline void native_x2apic_icr_write(u32 low, u32 id)
{
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
static inline u64 native_x2apic_icr_read(void)
{
unsigned long val;
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
extern int x2apic, x2apic_phys;
extern void check_x2apic(void);
extern void enable_x2apic(void);
extern void enable_IR_x2apic(void);
@@ -110,30 +171,24 @@ static inline int x2apic_enabled(void)
return 0;
}
#else
#define x2apic_enabled() 0
static inline void check_x2apic(void)
{
}
static inline void enable_x2apic(void)
{
}
static inline void enable_IR_x2apic(void)
{
}
static inline int x2apic_enabled(void)
{
return 0;
}
#endif
struct apic_ops {
u32 (*read)(u32 reg);
void (*write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
};
extern struct apic_ops *apic_ops;
#define apic_read (apic_ops->read)
#define apic_write (apic_ops->write)
#define apic_icr_read (apic_ops->icr_read)
#define apic_icr_write (apic_ops->icr_write)
#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
extern int get_physical_broadcast(void);
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_X2APIC
static inline void ack_x2APIC_irq(void)
{
/* Docs say use 0 for future compatibility */
@@ -141,18 +196,6 @@ static inline void ack_x2APIC_irq(void)
}
#endif
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
}
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void);
@@ -196,4 +239,327 @@ static inline void disable_local_APIC(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_64
#define SET_APIC_ID(x) (apic->set_apic_id(x))
#else
#endif
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct apic {
char *name;
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
int (*apic_id_registered)(void);
u32 irq_delivery_mode;
u32 irq_dest_mode;
const struct cpumask *(*target_cpus)(void);
int disable_esr;
int dest_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
unsigned long (*check_apicid_present)(int apicid);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void);
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
void (*enable_apic_mode)(void);
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
/*
* When one of the next two hooks returns 1 the apic
* is switched to this. Essentially they are additional
* probe functions:
*/
int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* wakeup_secondary_cpu */
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
int trampoline_phys_low;
int trampoline_phys_high;
void (*wait_for_init_deassert)(atomic_t *deassert);
void (*smp_callin_clear_local_apic)(void);
void (*inquire_remote_apic)(int apicid);
/* apic ops */
u32 (*read)(u32 reg);
void (*write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
};
/*
* Pointer to the local APIC driver in use on this system (there's
* always just one such driver in use - the kernel decides via an
* early probing process which one it picks - and then sticks to it):
*/
extern struct apic *apic;
/*
* APIC functionality to boot other CPUs - only used on SMP:
*/
#ifdef CONFIG_SMP
extern atomic_t init_deasserted;
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
#endif
static inline u32 apic_read(u32 reg)
{
return apic->read(reg);
}
static inline void apic_write(u32 reg, u32 val)
{
apic->write(reg, val);
}
static inline u64 apic_icr_read(void)
{
return apic->icr_read();
}
static inline void apic_icr_write(u32 low, u32 high)
{
apic->icr_write(low, high);
}
static inline void apic_wait_icr_idle(void)
{
apic->wait_icr_idle();
}
static inline u32 safe_apic_wait_icr_idle(void)
{
return apic->safe_wait_icr_idle();
}
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
}
static inline unsigned default_get_apic_id(unsigned long x)
{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
if (APIC_XAPIC(ver))
return (x >> 24) & 0xFF;
else
return (x >> 24) & 0x0F;
}
/*
* Warm reset vector default position:
*/
#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
#ifdef CONFIG_X86_64
extern struct apic apic_flat;
extern struct apic apic_physflat;
extern struct apic apic_x2apic_cluster;
extern struct apic apic_x2apic_phys;
extern int default_acpi_madt_oem_check(char *, char *);
extern void apic_send_IPI_self(int vector);
extern struct apic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
#endif
static inline void default_wait_for_init_deassert(atomic_t *deassert)
{
while (!atomic_read(deassert))
cpu_relax();
return;
}
extern void generic_bigsmp_probe(void);
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
static inline unsigned int read_apic_id(void)
{
unsigned int reg;
reg = apic_read(APIC_ID);
return apic->get_apic_id(reg);
}
extern void default_setup_apic_routing(void);
#ifdef CONFIG_X86_32
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
extern void default_init_apic_ldr(void);
static inline int default_apic_id_registered(void)
{
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
static inline unsigned int
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return cpumask_bits(cpumask)[0];
}
static inline unsigned int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
return (unsigned int)(mask1 & mask2 & mask3);
}
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
extern int default_apicid_to_node(int logical_apicid);
#endif
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
{
return physid_isset(apicid, bitmap);
}
static inline unsigned long default_check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
{
return phys_map;
}
/* Mapping from cpu number to logical apicid */
static inline int default_cpu_to_logical_apicid(int cpu)
{
return 1 << cpu;
}
static inline int __default_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline int
__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
}
#ifdef CONFIG_X86_32
static inline int default_cpu_present_to_apicid(int mps_cpu)
{
return __default_cpu_present_to_apicid(mps_cpu);
}
static inline int
default_check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
}
#else
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
#endif
static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_32
extern u8 cpu_2_logical_apicid[NR_CPUS];
#endif
#endif /* _ASM_X86_APIC_H */

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_X86_APICNUM_H
#define _ASM_X86_APICNUM_H
/* define MAX_IO_APICS */
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
#endif /* _ASM_X86_APICNUM_H */

View File

@@ -1,26 +0,0 @@
#ifndef _ASM_X86_ARCH_HOOKS_H
#define _ASM_X86_ARCH_HOOKS_H
#include <linux/interrupt.h>
/*
* linux/include/asm/arch_hooks.h
*
* define the architecture specific hooks
*/
/* these aren't arch hooks, they are generic routines
* that can be used by the hooks */
extern void init_ISA_irqs(void);
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
/* these are the defined hooks */
extern void intr_init_hook(void);
extern void pre_intr_init_hook(void);
extern void pre_setup_arch_hook(void);
extern void trap_init_hook(void);
extern void pre_time_init_hook(void);
extern void time_init_hook(void);
extern void mca_nmi_hook(void);
#endif /* _ASM_X86_ARCH_HOOKS_H */

View File

@@ -1,155 +0,0 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
#define esr_disable (1)
static inline int apic_id_registered(void)
{
return (1);
}
static inline const cpumask_t *target_cpus(void)
{
#ifdef CONFIG_SMP
return &cpu_online_map;
#else
return &cpumask_of_cpu(0);
#endif
}
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target proc */
#define NO_BALANCE_IRQ (0)
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return (0);
}
static inline unsigned long check_apicid_present(int bit)
{
return (1);
}
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long val, id;
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
id = xapic_phys_to_log_apicid(cpu);
val |= SET_APIC_LOGICAL_ID(id);
return val;
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static inline void init_apic_ldr(void)
{
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_VALUE);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
static inline void setup_apic_routing(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Physflat", nr_ioapics);
}
static inline int multi_timer_check(int apic, int irq)
{
return (0);
}
static inline int apicid_to_node(int logical_apicid)
{
return apicid_2_node[hard_smp_processor_id()];
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return cpu_physical_id(cpu);
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0xFFL);
}
static inline void setup_portio_remap(void)
{
}
static inline void enable_apic_mode(void)
{
}
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return (1);
}
/* As we are using single CPU as destination, pick only one CPU here */
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{
int cpu;
int apicid;
cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu);
return apicid;
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
for_each_cpu_and(cpu, cpumask, andmask)
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
if (cpu < nr_cpu_ids)
return cpu_to_logical_apicid(cpu);
return BAD_APICID;
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif /* __ASM_MACH_APIC_H */

View File

@@ -1,13 +0,0 @@
#ifndef __ASM_MACH_APICDEF_H
#define __ASM_MACH_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (((x)>>24)&0xFF);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif

View File

@@ -1,22 +0,0 @@
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_mask, vector);
}
#endif /* __ASM_MACH_IPI_H */

View File

@@ -10,17 +10,31 @@
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
#ifdef __KERNEL__
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
#ifdef CONFIG_KERNEL_BZIP2
#define BOOT_HEAP_SIZE 0x400000
#else /* !CONFIG_KERNEL_BZIP2 */
#ifdef CONFIG_X86_64
#define BOOT_HEAP_SIZE 0x7000
#define BOOT_STACK_SIZE 0x4000
#else
#define BOOT_HEAP_SIZE 0x4000
#endif
#endif /* !CONFIG_KERNEL_BZIP2 */
#ifdef CONFIG_X86_64
#define BOOT_STACK_SIZE 0x4000
#else
#define BOOT_STACK_SIZE 0x1000
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BOOT_H */

View File

@@ -5,24 +5,43 @@
#include <linux/mm.h>
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
static inline void flush_cache_all(void) { }
static inline void flush_cache_mm(struct mm_struct *mm) { }
static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) { }
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn) { }
static inline void flush_dcache_page(struct page *page) { }
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
static inline void flush_icache_range(unsigned long start,
unsigned long end) { }
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page) { }
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr,
unsigned long len) { }
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
static inline void flush_cache_vunmap(unsigned long start,
unsigned long end) { }
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
}
static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
}
#define PG_non_WB PG_arch_1
PAGEFLAG(NonWB, non_WB)

View File

@@ -1,5 +1,55 @@
/*
* Some macros to handle stack frames in assembly.
x86 function call convention, 64-bit:
-------------------------------------
arguments | callee-saved | extra caller-saved | return
[callee-clobbered] | | [callee-clobbered] |
---------------------------------------------------------------------------
rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
( rsp is obviously invariant across normal function calls. (gcc can 'merge'
functions when it sees tail-call optimization possibilities) rflags is
clobbered. Leftover arguments are passed over the stack frame.)
[*] In the frame-pointers case rbp is fixed to the stack frame.
[**] for struct return values wider than 64 bits the return convention is a
bit more complex: up to 128 bits width we return small structures
straight in rax, rdx. For structures larger than that (3 words or
larger) the caller puts a pointer to an on-stack return struct
[allocated in the caller's stack frame] into the first argument - i.e.
into rdi. All other arguments shift up by one in this case.
Fortunately this case is rare in the kernel.
For 32-bit we have the following conventions - kernel is built with
-mregparm=3 and -freg-struct-return:
x86 function calling convention, 32-bit:
----------------------------------------
arguments | callee-saved | extra caller-saved | return
[callee-clobbered] | | [callee-clobbered] |
-------------------------------------------------------------------------
eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
( here too esp is obviously invariant across normal function calls. eflags
is clobbered. Leftover arguments are passed over the stack frame. )
[*] In the frame-pointers case ebp is fixed to the stack frame.
[**] We build with -freg-struct-return, which on 32-bit means similar
semantics as on 64-bit: edx can be used for a second return value
(i.e. covering integer and structure sizes up to 64 bits) - after that
it gets more complex and more expensive: 3-word or larger struct returns
get done in the caller's frame and the pointer to the return struct goes
into regparm0, i.e. eax - the other arguments shift up and the
function's register parameters degenerate to regparm=2 in essence.
*/
/*
* 64-bit system call stack frame layout defines and helpers,
* for assembly code:
*/
#define R15 0
@@ -9,7 +59,7 @@
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/
/* arguments: interrupts/non tracing syscalls only save up to here: */
#define R11 48
#define R10 56
#define R9 64
@@ -22,7 +72,7 @@
#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall. */
/* cpu exception frame or undefined in case of fast syscall: */
#define RIP 128
#define CS 136
#define EFLAGS 144

View File

@@ -7,6 +7,20 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
#ifdef CONFIG_SMP
extern void prefill_possible_map(void);
#else /* CONFIG_SMP */
static inline void prefill_possible_map(void) {}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif /* CONFIG_SMP */
struct x86_cpu {
struct cpu cpu;
};
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
#endif
DECLARE_PER_CPU(int, cpu_state);
extern unsigned int boot_cpu_id;
#endif /* _ASM_X86_CPU_H */

View File

@@ -213,6 +213,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)

View File

@@ -0,0 +1,32 @@
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#ifdef CONFIG_X86_64
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
extern void setup_cpu_local_masks(void);
#else /* CONFIG_X86_32 */
extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
static inline void setup_cpu_local_masks(void) { }
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */

View File

@@ -1,39 +1,21 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
#ifdef CONFIG_X86_32
#include <linux/compiler.h>
#include <asm/percpu.h>
#ifndef __ASSEMBLY__
struct task_struct;
DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void)
{
return x86_read_percpu(current_task);
}
#else /* X86_32 */
#ifndef __ASSEMBLY__
#include <asm/pda.h>
struct task_struct;
static __always_inline struct task_struct *get_current(void)
{
return read_pda(pcurrent);
return percpu_read(current_task);
}
#else /* __ASSEMBLY__ */
#include <asm/asm-offsets.h>
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
#endif /* __ASSEMBLY__ */
#endif /* X86_32 */
#define current get_current()
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CURRENT_H */

View File

@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
* now struct_user_regs, they are different)
*/
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
do { \
pr_reg[0] = regs->bx; \
pr_reg[1] = regs->cx; \
@@ -124,7 +124,6 @@ do { \
pr_reg[7] = regs->ds & 0xffff; \
pr_reg[8] = regs->es & 0xffff; \
pr_reg[9] = regs->fs & 0xffff; \
savesegment(gs, pr_reg[10]); \
pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \
pr_reg[13] = regs->cs & 0xffff; \
@@ -133,6 +132,18 @@ do { \
pr_reg[16] = regs->ss & 0xffff; \
} while (0);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
pr_reg[10] = get_user_gs(regs); \
} while (0);
#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
do { \
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
savesegment(gs, pr_reg[10]); \
} while (0);
#define ELF_PLATFORM (utsname()->machine)
#define set_personality_64bit() do { } while (0)

View File

@@ -9,12 +9,28 @@
* is no hardware IRQ pin equivalent for them, they are triggered
* through the ICC by us (IPIs)
*/
#ifdef CONFIG_X86_SMP
#ifdef CONFIG_SMP
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
smp_invalidate_interrupt)
#endif
/*
@@ -25,10 +41,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
* a much simpler SMP time architecture:
*/
#ifdef CONFIG_X86_LOCAL_APIC
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
#ifdef CONFIG_PERF_COUNTERS
BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
#endif
#ifdef CONFIG_X86_MCE_P4THERMAL
BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
#endif

View File

@@ -1,242 +0,0 @@
#ifndef __ASM_ES7000_APIC_H
#define __ASM_ES7000_APIC_H
#include <linux/gfp.h>
#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
#define esr_disable (1)
static inline int apic_id_registered(void)
{
return (1);
}
static inline const cpumask_t *target_cpus_cluster(void)
{
return &CPU_MASK_ALL;
}
static inline const cpumask_t *target_cpus(void)
{
return &cpumask_of_cpu(smp_processor_id());
}
#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
#define NO_BALANCE_IRQ_CLUSTER (1)
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target procs */
#define NO_BALANCE_IRQ (0)
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0x0
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return 0;
}
static inline unsigned long check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
#define apicid_cluster(apicid) (apicid & 0xF0)
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long id;
id = xapic_phys_to_log_apicid(cpu);
return (SET_APIC_LOGICAL_ID(id));
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LdR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static inline void init_apic_ldr_cluster(void)
{
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
static inline void init_apic_ldr(void)
{
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_VALUE);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
extern int apic_version [MAX_APICS];
static inline void setup_apic_routing(void)
{
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster",
nr_ioapics, cpus_addr(*target_cpus())[0]);
}
static inline int multi_timer_check(int apic, int irq)
{
return 0;
}
static inline int apicid_to_node(int logical_apicid)
{
return 0;
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (!mps_cpu)
return boot_cpu_physical_apicid;
else if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
{
static int id = 0;
physid_mask_t mask;
mask = physid_mask_of_physid(id);
++id;
return mask;
}
extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
#ifdef CONFIG_SMP
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu];
#else
return logical_smp_processor_id();
#endif
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0xff);
}
static inline void setup_portio_remap(void)
{
}
extern unsigned int boot_cpu_physical_apicid;
static inline int check_phys_apicid_present(int cpu_physical_apicid)
{
boot_cpu_physical_apicid = read_apic_id();
return (1);
}
static inline unsigned int
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
{
int num_bits_set;
int cpus_found = 0;
int cpu;
int apicid;
num_bits_set = cpumask_weight(cpumask);
/* Return id to all */
if (num_bits_set == nr_cpu_ids)
return 0xFF;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
return 0xFF;
}
apicid = new_apicid;
cpus_found++;
}
cpu++;
}
return apicid;
}
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{
int num_bits_set;
int cpus_found = 0;
int cpu;
int apicid;
num_bits_set = cpus_weight(*cpumask);
/* Return id to all */
if (num_bits_set == nr_cpu_ids)
return cpu_to_logical_apicid(0);
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, *cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
return cpu_to_logical_apicid(0);
}
apicid = new_apicid;
cpus_found++;
}
cpu++;
}
return apicid;
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask)
{
int apicid = cpu_to_logical_apicid(0);
cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
return apicid;
cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask);
apicid = cpu_mask_to_apicid(cpumask);
free_cpumask_var(cpumask);
return apicid;
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif /* __ASM_ES7000_APIC_H */

View File

@@ -1,13 +0,0 @@
#ifndef __ASM_ES7000_APICDEF_H
#define __ASM_ES7000_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (((x)>>24)&0xFF);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif

View File

@@ -1,22 +0,0 @@
#ifndef __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_mask, vector);
}
#endif /* __ASM_ES7000_IPI_H */

View File

@@ -1,29 +0,0 @@
#ifndef __ASM_ES7000_MPPARSE_H
#define __ASM_ES7000_MPPARSE_H
#include <linux/acpi.h>
extern int parse_unisys_oem (char *oemptr);
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
extern void setup_unisys(void);
#ifndef CONFIG_X86_GENERICARCH
extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
#endif
#ifdef CONFIG_ACPI
static inline int es7000_check_dsdt(void)
{
struct acpi_table_header header;
if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
!strncmp(header.oem_id, "UNISYS", 6))
return 1;
return 0;
}
#endif
#endif /* __ASM_MACH_MPPARSE_H */

View File

@@ -1,37 +0,0 @@
#ifndef __ASM_ES7000_WAKECPU_H
#define __ASM_ES7000_WAKECPU_H
#define TRAMPOLINE_PHYS_LOW 0x467
#define TRAMPOLINE_PHYS_HIGH 0x469
static inline void wait_for_init_deassert(atomic_t *deassert)
{
#ifndef CONFIG_ES7000_CLUSTERED_APIC
while (!atomic_read(deassert))
cpu_relax();
#endif
return;
}
/* Nothing to do for most platforms, since cleared by the INIT cycle */
static inline void smp_callin_clear_local_apic(void)
{
}
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
{
}
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
}
extern void __inquire_remote_apic(int apicid);
static inline void inquire_remote_apic(int apicid)
{
if (apic_verbosity >= APIC_DEBUG)
__inquire_remote_apic(apicid);
}
#endif /* __ASM_MACH_WAKECPU_H */

View File

@@ -1,12 +1,146 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
*/
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#ifdef CONFIG_X86_32
# include "fixmap_32.h"
#include <linux/threads.h>
#include <asm/kmap_types.h>
#else
# include "fixmap_64.h"
#include <asm/vsyscall.h>
#endif
/*
* We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
* uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
* Because of this, FIXADDR_TOP x86 integration was left as later work.
*/
#ifdef CONFIG_X86_32
/* used by vmalloc.c, vsyscall.lds.S.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap.
*/
extern unsigned long __FIXADDR_TOP;
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
#else
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process.
* for x86_32: We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* These 'compile-time allocated' memory buffers are
* fixed-size 4k pages (or larger if used with an increment
* higher than 1). Use set_fixmap(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
#ifdef CONFIG_X86_32
FIX_HOLE,
FIX_VDSO,
#else
VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
#ifdef CONFIG_X86_IO_APIC
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
#endif
#ifdef CONFIG_X86_VISWS_APIC
FIX_CO_CPU, /* Cobalt timer */
FIX_CO_APIC, /* Cobalt APIC Redirection Table */
FIX_LI_PCIA, /* Lithium PCI Bridge A */
FIX_LI_PCIB, /* Lithium PCI Bridge B */
#endif
#ifdef CONFIG_X86_F00F_BUG
FIX_F00F_IDT, /* Virtual mapping for IDT */
#endif
#ifdef CONFIG_X86_CYCLONE_TIMER
FIX_CYCLONE_TIMER, /*cyclone timer register*/
#endif
#ifdef CONFIG_X86_32
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
#endif
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*
* We round it up to the next 256 pages boundary so that we
* can have a single pgd entry and a single pte table:
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_SLOTS 4
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
#ifdef CONFIG_X86_32
FIX_WP_TEST,
#endif
__end_of_fixed_addresses
};
extern void reserve_top_address(unsigned long reserve);
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
extern int fixmaps_set;
extern pte_t *kmap_pte;
@@ -69,4 +203,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_H */

View File

@@ -1,119 +0,0 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_X86_FIXMAP_32_H
#define _ASM_X86_FIXMAP_32_H
/* used by vmalloc.c, vsyscall.lds.S.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap.
*/
extern unsigned long __FIXADDR_TOP;
#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <linux/threads.h>
#include <asm/kmap_types.h>
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
FIX_HOLE,
FIX_VDSO,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
#ifdef CONFIG_X86_IO_APIC
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
#endif
#ifdef CONFIG_X86_VISWS_APIC
FIX_CO_CPU, /* Cobalt timer */
FIX_CO_APIC, /* Cobalt APIC Redirection Table */
FIX_LI_PCIA, /* Lithium PCI Bridge A */
FIX_LI_PCIB, /* Lithium PCI Bridge B */
#endif
#ifdef CONFIG_X86_F00F_BUG
FIX_F00F_IDT, /* Virtual mapping for IDT */
#endif
#ifdef CONFIG_X86_CYCLONE_TIMER
FIX_CYCLONE_TIMER, /*cyclone timer register*/
#endif
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*
* We round it up to the next 256 pages boundary so that we
* can have a single pgd entry and a single pte table:
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_SLOTS 4
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
FIX_WP_TEST,
#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
__end_of_fixed_addresses
};
extern void reserve_top_address(unsigned long reserve);
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_32_H */

View File

@@ -1,79 +0,0 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*/
#ifndef _ASM_X86_FIXMAP_64_H
#define _ASM_X86_FIXMAP_64_H
#include <linux/kernel.h>
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/vsyscall.h>
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process.
*
* These 'compile-time allocated' memory buffers are
* fixed-size 4k pages (or larger if used with an increment
* higher than 1). Use set_fixmap(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*
* We round it up to the next 256 pages boundary so that we
* can have a single pgd entry and a single pte table:
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_SLOTS 4
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
__end_of_fixed_addresses
};
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
#endif /* _ASM_X86_FIXMAP_64_H */

View File

@@ -1,5 +1 @@
#ifdef CONFIG_X86_32
# include "genapic_32.h"
#else
# include "genapic_64.h"
#endif
#include <asm/apic.h>

View File

@@ -1,148 +0,0 @@
#ifndef _ASM_X86_GENAPIC_32_H
#define _ASM_X86_GENAPIC_32_H
#include <asm/mpspec.h>
#include <asm/atomic.h>
/*
* Generic APIC driver interface.
*
* An straight forward mapping of the APIC related parts of the
* x86 subarchitecture interface to a dynamic object.
*
* This is used by the "generic" x86 subarchitecture.
*
* Copyright 2003 Andi Kleen, SuSE Labs.
*/
struct mpc_bus;
struct mpc_table;
struct mpc_cpu;
struct genapic {
char *name;
int (*probe)(void);
int (*apic_id_registered)(void);
const struct cpumask *(*target_cpus)(void);
int int_delivery_mode;
int int_dest_mode;
int ESR_DISABLE;
int apic_destination_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
unsigned long (*check_apicid_present)(int apicid);
int no_balance_irq;
int no_ioapic_check;
void (*init_apic_ldr)(void);
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
void (*enable_apic_mode)(void);
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
/* mpparse */
/* When one of the next two hooks returns 1 the genapic
is switched to this. Essentially they are additional probe
functions. */
int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
char *productid);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
#ifdef CONFIG_SMP
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
#endif
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
int trampoline_phys_low;
int trampoline_phys_high;
void (*wait_for_init_deassert)(atomic_t *deassert);
void (*smp_callin_clear_local_apic)(void);
void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
void (*inquire_remote_apic)(int apicid);
};
#define APICFUNC(x) .x = x,
/* More functions could be probably marked IPIFUNC and save some space
in UP GENERICARCH kernels, but I don't have the nerve right now
to untangle this mess. -AK */
#ifdef CONFIG_SMP
#define IPIFUNC(x) APICFUNC(x)
#else
#define IPIFUNC(x)
#endif
#define APIC_INIT(aname, aprobe) \
{ \
.name = aname, \
.probe = aprobe, \
.int_delivery_mode = INT_DELIVERY_MODE, \
.int_dest_mode = INT_DEST_MODE, \
.no_balance_irq = NO_BALANCE_IRQ, \
.ESR_DISABLE = esr_disable, \
.apic_destination_logical = APIC_DEST_LOGICAL, \
APICFUNC(apic_id_registered) \
APICFUNC(target_cpus) \
APICFUNC(check_apicid_used) \
APICFUNC(check_apicid_present) \
APICFUNC(init_apic_ldr) \
APICFUNC(ioapic_phys_id_map) \
APICFUNC(setup_apic_routing) \
APICFUNC(multi_timer_check) \
APICFUNC(apicid_to_node) \
APICFUNC(cpu_to_logical_apicid) \
APICFUNC(cpu_present_to_apicid) \
APICFUNC(apicid_to_cpu_present) \
APICFUNC(setup_portio_remap) \
APICFUNC(check_phys_apicid_present) \
APICFUNC(mps_oem_check) \
APICFUNC(get_apic_id) \
.apic_id_mask = APIC_ID_MASK, \
APICFUNC(cpu_mask_to_apicid) \
APICFUNC(cpu_mask_to_apicid_and) \
APICFUNC(vector_allocation_domain) \
APICFUNC(acpi_madt_oem_check) \
IPIFUNC(send_IPI_mask) \
IPIFUNC(send_IPI_allbutself) \
IPIFUNC(send_IPI_all) \
APICFUNC(enable_apic_mode) \
APICFUNC(phys_pkg_id) \
.trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
.trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
APICFUNC(wait_for_init_deassert) \
APICFUNC(smp_callin_clear_local_apic) \
APICFUNC(store_NMI_vector) \
APICFUNC(restore_NMI_vector) \
APICFUNC(inquire_remote_apic) \
}
extern struct genapic *genapic;
extern void es7000_update_genapic_to_cluster(void);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#define get_uv_system_type() UV_NONE
#define is_uv_system() 0
#define uv_wakeup_secondary(a, b) 1
#define uv_system_init() do {} while (0)
#endif /* _ASM_X86_GENAPIC_32_H */

View File

@@ -1,66 +0,0 @@
#ifndef _ASM_X86_GENAPIC_64_H
#define _ASM_X86_GENAPIC_64_H
#include <linux/cpumask.h>
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct genapic {
char *name;
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
u32 int_delivery_mode;
u32 int_dest_mode;
int (*apic_id_registered)(void);
const struct cpumask *(*target_cpus)(void);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* */
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
unsigned int (*phys_pkg_id)(int index_msb);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
/* wakeup_secondary_cpu */
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
};
extern struct genapic *genapic;
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
extern struct genapic apic_x2apic_cluster;
extern struct genapic apic_x2apic_phys;
extern int acpi_madt_oem_check(char *, char *);
extern void apic_send_IPI_self(int vector);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern struct genapic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern void uv_cpu_init(void);
extern void uv_system_init(void);
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
extern void setup_apic_routing(void);
#endif /* _ASM_X86_GENAPIC_64_H */

View File

@@ -1,11 +1,52 @@
#ifdef CONFIG_X86_32
# include "hardirq_32.h"
#else
# include "hardirq_64.h"
#ifndef _ASM_X86_HARDIRQ_H
#define _ASM_X86_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
unsigned int irq0_irqs;
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count;
#endif
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
#endif
#ifdef CONFIG_X86_MCE
unsigned int irq_thermal_count;
# ifdef CONFIG_X86_64
unsigned int irq_threshold_count;
# endif
#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
#endif /* _ASM_X86_HARDIRQ_H */

View File

@@ -1,30 +0,0 @@
#ifndef _ASM_X86_HARDIRQ_32_H
#define _ASM_X86_HARDIRQ_32_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned long idle_timestamp;
unsigned int __nmi_count; /* arch dependent */
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq0_irqs;
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
unsigned int irq_thermal_count;
unsigned int irq_spurious_count;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h>
#endif /* _ASM_X86_HARDIRQ_32_H */

View File

@@ -1,25 +0,0 @@
#ifndef _ASM_X86_HARDIRQ_64_H
#define _ASM_X86_HARDIRQ_64_H
#include <linux/threads.h>
#include <linux/irq.h>
#include <asm/pda.h>
#include <asm/apic.h>
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT 1
#define inc_irq_stat(member) add_pda(member, 1)
#define local_softirq_pending() read_pda(__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING 1
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
#endif /* _ASM_X86_HARDIRQ_64_H */

View File

@@ -25,8 +25,6 @@
#include <asm/irq.h>
#include <asm/sections.h>
#define platform_legacy_irq(irq) ((irq) < 16)
/* Interrupt handlers registered during init_IRQ */
extern void apic_timer_interrupt(void);
extern void error_interrupt(void);
@@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
extern void init_8259A(int aeoi);
/* IOAPIC */
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
extern void setup_ioapic_dest(void);
#ifdef CONFIG_X86_64
extern void enable_IO_APIC(void);
#endif
/* IPI functions */
#ifdef CONFIG_X86_32
extern void send_IPI_self(int vector);
#endif
extern void send_IPI(int dest, int vector);
/* Statistics */
extern atomic_t irq_err_count;
@@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
/* Voyager functions */
extern asmlinkage void vic_cpi_interrupt(void);
extern asmlinkage void vic_sys_interrupt(void);
extern asmlinkage void vic_cmn_interrupt(void);
extern asmlinkage void qic_timer_interrupt(void);
extern asmlinkage void qic_invalidate_interrupt(void);
extern asmlinkage void qic_reschedule_interrupt(void);
extern asmlinkage void qic_enable_irq_interrupt(void);
extern asmlinkage void qic_call_function_interrupt(void);
/* SMP */
extern void smp_apic_timer_interrupt(struct pt_regs *);
extern void smp_spurious_interrupt(struct pt_regs *);
extern void smp_error_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_SMP
#ifdef CONFIG_SMP
extern void smp_reschedule_interrupt(struct pt_regs *);
extern void smp_call_function_interrupt(struct pt_regs *);
extern void smp_call_function_single_interrupt(struct pt_regs *);

View File

@@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip;
extern void mask_8259A(void);
extern void unmask_8259A(void);
#ifdef CONFIG_X86_32
extern void init_ISA_irqs(void);
#endif
#endif /* _ASM_X86_I8259_H */

View File

@@ -129,13 +129,6 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
struct ustat32 {
__u32 f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__

View File

@@ -5,6 +5,7 @@
#include <linux/compiler.h>
#include <asm-generic/int-ll64.h>
#include <asm/page.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,98 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
#define readq readq
#define writeq writeq
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline phys_addr_t virt_to_phys(volatile void *address)
{
return __pa(address);
}
/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void *phys_to_virt(phys_addr_t address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
* However, we truncate the address to unsigned int to avoid undesirable
* promitions in legacy drivers.
*/
static inline unsigned int isa_virt_to_bus(volatile void *address)
{
return (unsigned int)virt_to_phys(address);
}
#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
#ifdef CONFIG_X86_32
# include "io_32.h"
#else
@@ -91,7 +184,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val);
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
/*
* early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -103,7 +196,7 @@ extern void early_ioremap_reset(void);
extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
#define IO_SPACE_LIMIT 0xffff
#endif /* _ASM_X86_IO_H */

View File

@@ -37,8 +37,6 @@
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
#define IO_SPACE_LIMIT 0xffff
#define XQUAD_PORTIO_BASE 0xfe400000
#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
@@ -53,92 +51,6 @@
*/
#define xlate_dev_kmem_ptr(p) p
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{

View File

@@ -136,73 +136,12 @@ __OUTS(b)
__OUTS(w)
__OUTS(l)
#define IO_SPACE_LIMIT 0xffff
#if defined(__KERNEL__) && defined(__x86_64__)
#include <linux/vmalloc.h>
#ifndef __i386__
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#include <asm-generic/iomap.h>
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
void __memcpy_fromio(void *, unsigned long, unsigned);
void __memcpy_toio(unsigned long, const void *, unsigned);

View File

@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];
/*
* MP-BIOS irq configuration table structures:
*/
#define MP_MAX_IOAPIC_PIN 127
struct mp_config_ioapic {
unsigned long mp_apicaddr;
unsigned int mp_apicid;
unsigned char mp_type;
unsigned char mp_apicver;
unsigned char mp_flags;
};
struct mp_config_intsrc {
unsigned int mp_dstapic;
unsigned char mp_type;
unsigned char mp_irqtype;
unsigned short mp_irqflag;
unsigned char mp_srcbus;
unsigned char mp_srcbusirq;
unsigned char mp_dstirq;
};
/* I/O APIC entries */
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
extern int timer_through_8259;
static inline void disable_ioapic_setup(void)
{
#ifdef CONFIG_PCI
noioapicquirk = 1;
noioapicreroute = -1;
#endif
skip_ioapic_setup = 1;
}
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
@@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
extern void probe_nr_irqs_gsi(void);
extern int setup_ioapic_entry(int apic, int irq,
struct IO_APIC_route_entry *entry,
unsigned int destination, int trigger,
int polarity, int vector);
extern void ioapic_write_entry(int apic, int pin,
struct IO_APIC_route_entry e);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;

View File

@@ -1,6 +1,8 @@
#ifndef _ASM_X86_IPI_H
#define _ASM_X86_IPI_H
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
cpu_relax();
}
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
unsigned int dest)
static inline void
__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
static inline void __send_IPI_dest_field(unsigned int mask, int vector,
unsigned int dest)
static inline void
__default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
{
unsigned long cfg;
@@ -117,41 +119,44 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
native_apic_mem_write(APIC_ICR, cfg);
}
static inline void send_IPI_mask_sequence(const struct cpumask *mask,
int vector)
{
unsigned long flags;
unsigned long query_cpu;
extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
int vector);
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicast to each CPU instead.
* - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
/* Avoid include hell */
#define NMI_VECTOR 0x02
extern int no_broadcast;
static inline void __default_local_send_IPI_allbutself(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
else
__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
}
static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
int vector)
static inline void __default_local_send_IPI_all(int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
local_irq_restore(flags);
if (no_broadcast || vector == NMI_VECTOR)
apic->send_IPI_mask(cpu_online_mask, vector);
else
__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
}
#ifdef CONFIG_X86_32
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
int vector);
extern void default_send_IPI_allbutself(int vector);
extern void default_send_IPI_all(int vector);
extern void default_send_IPI_self(int vector);
#endif
#endif
#endif /* _ASM_X86_IPI_H */

View File

@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
extern void fixup_irqs(void);
#endif
extern unsigned int do_IRQ(struct pt_regs *regs);
extern void init_IRQ(void);
extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
extern unsigned int do_IRQ(struct pt_regs *regs);
/* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);

View File

@@ -1,5 +1,31 @@
#ifdef CONFIG_X86_32
# include "irq_regs_32.h"
#else
# include "irq_regs_64.h"
#endif
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack, stored in the per-cpu area.
*
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
#ifndef _ASM_X86_IRQ_REGS_H
#define _ASM_X86_IRQ_REGS_H
#include <asm/percpu.h>
#define ARCH_HAS_OWN_IRQ_REGS
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
return percpu_read(irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;
old_regs = get_irq_regs();
percpu_write(irq_regs, new_regs);
return old_regs;
}
#endif /* _ASM_X86_IRQ_REGS_32_H */

View File

@@ -1,31 +0,0 @@
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack, stored in the per-cpu area.
*
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
#ifndef _ASM_X86_IRQ_REGS_32_H
#define _ASM_X86_IRQ_REGS_32_H
#include <asm/percpu.h>
#define ARCH_HAS_OWN_IRQ_REGS
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
return x86_read_percpu(irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;
old_regs = get_irq_regs();
x86_write_percpu(irq_regs, new_regs);
return old_regs;
}
#endif /* _ASM_X86_IRQ_REGS_32_H */

View File

@@ -1 +0,0 @@
#include <asm-generic/irq_regs.h>

View File

@@ -1,47 +1,69 @@
#ifndef _ASM_X86_IRQ_VECTORS_H
#define _ASM_X86_IRQ_VECTORS_H
#include <linux/threads.h>
/*
* Linux IRQ vector layout.
*
* There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
* be defined by Linux. They are used as a jump table by the CPU when a
* given vector is triggered - by a CPU-external, CPU-internal or
* software-triggered event.
*
* Linux sets the kernel code address each entry jumps to early during
* bootup, and never changes them. This is the general layout of the
* IDT entries:
*
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
* Vectors 129 ... 237 : device interrupts
* Vectors 238 ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
*
* This file enumerates the exact layout of them:
*/
#define NMI_VECTOR 0x02
#define NMI_VECTOR 0x02
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
*/
#define FIRST_EXTERNAL_VECTOR 0x20
#define FIRST_EXTERNAL_VECTOR 0x20
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
# define SYSCALL_VECTOR 0x80
#else
# define IA32_SYSCALL_VECTOR 0x80
# define IA32_SYSCALL_VECTOR 0x80
#endif
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -49,119 +71,98 @@
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
*
* Vectors 0xf0-0xfa are free (reserved for future Linux use).
*/
#ifdef CONFIG_X86_32
# define SPURIOUS_APIC_VECTOR 0xff
# define ERROR_APIC_VECTOR 0xfe
# define INVALIDATE_TLB_VECTOR 0xfd
# define RESCHEDULE_VECTOR 0xfc
# define CALL_FUNCTION_VECTOR 0xfb
# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
# define THERMAL_APIC_VECTOR 0xf0
#else
#define SPURIOUS_APIC_VECTOR 0xff
/*
* Sanity check
*/
#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
# error SPURIOUS_APIC_VECTOR definition error
#endif
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
#define UV_BAU_MESSAGE 0xf8
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
#ifdef CONFIG_X86_32
/* 0xf8 - 0xf9 : free */
#else
# define THRESHOLD_APIC_VECTOR 0xf9
# define UV_BAU_MESSAGE 0xf8
#endif
/* f0-f7 used for spreading out TLB flushes: */
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0
#define NUM_INVALIDATE_TLB_VECTORS 8
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
*/
#define LOCAL_TIMER_VECTOR 0xef
#define LOCAL_TIMER_VECTOR 0xef
/*
* Performance monitoring interrupt vector:
*/
#define LOCAL_PERF_VECTOR 0xee
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#define NR_VECTORS 256
#define NR_VECTORS 256
#define FPU_IRQ 13
#define FPU_IRQ 13
#define FIRST_VM86_IRQ 3
#define LAST_VM86_IRQ 15
#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
#define FIRST_VM86_IRQ 3
#define LAST_VM86_IRQ 15
#define NR_IRQS_LEGACY 16
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
#ifndef CONFIG_SPARSE_IRQ
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
# else
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
#else
# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
# else
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
#ifndef __ASSEMBLY__
static inline int invalid_vm86_irq(int irq)
{
return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
}
#endif
#elif defined(CONFIG_X86_VOYAGER)
/*
* Size the maximum number of interrupts.
*
* If the irq_desc[] array has a sparse layout, we can size things
* generously - it scales up linearly with the maximum number of CPUs,
* and the maximum number of IO-APICs, whichever is higher.
*
* In other cases we size more conservatively, to not create too large
* static arrays.
*/
# define NR_IRQS 224
#define NR_IRQS_LEGACY 16
#else /* IO_APIC || VOYAGER */
# define NR_IRQS 16
#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
#ifdef CONFIG_X86_IO_APIC
# ifdef CONFIG_SPARSE_IRQ
# define NR_IRQS \
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# else
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
# else
# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
# endif
# endif
#else /* !CONFIG_X86_IO_APIC: */
# define NR_IRQS NR_IRQS_LEGACY
#endif
/* Voyager specific defines */
/* These define the CPIs we use in linux */
#define VIC_CPI_LEVEL0 0
#define VIC_CPI_LEVEL1 1
/* now the fake CPIs */
#define VIC_TIMER_CPI 2
#define VIC_INVALIDATE_CPI 3
#define VIC_RESCHEDULE_CPI 4
#define VIC_ENABLE_IRQ_CPI 5
#define VIC_CALL_FUNCTION_CPI 6
#define VIC_CALL_FUNCTION_SINGLE_CPI 7
/* Now the QIC CPIs: Since we don't need the two initial levels,
* these are 2 less than the VIC CPIs */
#define QIC_CPI_OFFSET 1
#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
#define VIC_START_FAKE_CPI VIC_TIMER_CPI
#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
/* this is the SYS_INT CPI. */
#define VIC_SYS_INT 8
#define VIC_CMN_INT 15
/* This is the boot CPI for alternate processors. It gets overwritten
* by the above once the system has activated all available processors */
#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
#endif /* _ASM_X86_IRQ_VECTORS_H */

View File

@@ -9,23 +9,8 @@
# define PAGES_NR 4
#else
# define PA_CONTROL_PAGE 0
# define VA_CONTROL_PAGE 1
# define PA_PGD 2
# define VA_PGD 3
# define PA_PUD_0 4
# define VA_PUD_0 5
# define PA_PMD_0 6
# define VA_PMD_0 7
# define PA_PTE_0 8
# define VA_PTE_0 9
# define PA_PUD_1 10
# define VA_PUD_1 11
# define PA_PMD_1 12
# define VA_PMD_1 13
# define PA_PTE_1 14
# define VA_PTE_1 15
# define PA_TABLE_PAGE 16
# define PAGES_NR 17
# define PA_TABLE_PAGE 1
# define PAGES_NR 2
#endif
#ifdef CONFIG_X86_32
@@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
unsigned long start_address) ATTRIB_NORET;
#endif
#ifdef CONFIG_X86_32
#define ARCH_HAS_KIMAGE_ARCH
#ifdef CONFIG_X86_32
struct kimage_arch {
pgd_t *pgd;
#ifdef CONFIG_X86_PAE
@@ -169,6 +154,12 @@ struct kimage_arch {
pte_t *pte0;
pte_t *pte1;
};
#else
struct kimage_arch {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
};
#endif
#endif /* __ASSEMBLY__ */

View File

@@ -15,6 +15,7 @@
#define __KVM_HAVE_DEVICE_ASSIGNMENT
#define __KVM_HAVE_MSI
#define __KVM_HAVE_USER_NMI
#define __KVM_HAVE_GUEST_DEBUG
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
@@ -212,7 +213,30 @@ struct kvm_pit_channel_state {
__s64 count_load_time;
};
struct kvm_debug_exit_arch {
__u32 exception;
__u32 pad;
__u64 pc;
__u64 dr6;
__u64 dr7;
};
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
#define KVM_GUESTDBG_INJECT_DB 0x00040000
#define KVM_GUESTDBG_INJECT_BP 0x00080000
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
__u64 debugreg[8];
};
struct kvm_pit_state {
struct kvm_pit_channel_state channels[3];
};
struct kvm_reinject_control {
__u8 pit_reinject;
__u8 reserved[31];
};
#endif /* _ASM_X86_KVM_H */

View File

@@ -22,6 +22,7 @@
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/msr-index.h>
#define KVM_MAX_VCPUS 16
#define KVM_MEMORY_SLOTS 32
@@ -134,11 +135,18 @@ enum {
#define KVM_NR_MEM_OBJS 40
struct kvm_guest_debug {
int enabled;
unsigned long bp[4];
int singlestep;
};
#define KVM_NR_DB_REGS 4
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
#define DR6_FIXED_1 0xffff0ff0
#define DR6_VOLATILE 0x0000e00f
#define DR7_BP_EN_MASK 0x000000ff
#define DR7_GE (1 << 9)
#define DR7_GD (1 << 13)
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff23ff
/*
* We don't want allocation failures within the mmu code, so we preallocate
@@ -162,7 +170,8 @@ struct kvm_pte_chain {
* bits 0:3 - total guest paging levels (2-4, or zero for real mode)
* bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
* bit 16 - direct mapping of virtual to physical mapping at gfn
* used for real mode and two-dimensional paging
* bits 17:19 - common access permissions for all ptes in this shadow page
*/
union kvm_mmu_page_role {
@@ -172,9 +181,10 @@ union kvm_mmu_page_role {
unsigned level:4;
unsigned quadrant:2;
unsigned pad_for_nice_hex_output:6;
unsigned metaphysical:1;
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
unsigned cr4_pge:1;
};
};
@@ -218,6 +228,18 @@ struct kvm_pv_mmu_op_buffer {
char buf[512] __aligned(sizeof(long));
};
struct kvm_pio_request {
unsigned long count;
int cur_count;
gva_t guest_gva;
int in;
int port;
int size;
int string;
int down;
int rep;
};
/*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu
@@ -236,6 +258,7 @@ struct kvm_mmu {
hpa_t root_hpa;
int root_level;
int shadow_root_level;
union kvm_mmu_page_role base_role;
u64 *pae_root;
};
@@ -258,6 +281,7 @@ struct kvm_vcpu_arch {
unsigned long cr3;
unsigned long cr4;
unsigned long cr8;
u32 hflags;
u64 pdptrs[4]; /* pae */
u64 shadow_efer;
u64 apic_base;
@@ -338,6 +362,15 @@ struct kvm_vcpu_arch {
struct mtrr_state_type mtrr_state;
u32 pat;
int switch_db_regs;
unsigned long host_db[KVM_NR_DB_REGS];
unsigned long host_dr6;
unsigned long host_dr7;
unsigned long db[KVM_NR_DB_REGS];
unsigned long dr6;
unsigned long dr7;
unsigned long eff_db[KVM_NR_DB_REGS];
};
struct kvm_mem_alias {
@@ -378,6 +411,7 @@ struct kvm_arch{
unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
u64 vm_init_tsc;
};
struct kvm_vm_stat {
@@ -446,8 +480,7 @@ struct kvm_x86_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu);
int (*set_guest_debug)(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg);
void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
struct kvm_guest_debug *dbg);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
@@ -583,16 +616,12 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code);
void kvm_pic_set_irq(void *opaque, int irq, int level);
int kvm_pic_set_irq(void *opaque, int irq, int level);
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void fx_init(struct kvm_vcpu *vcpu);
int emulator_read_std(unsigned long addr,
void *val,
unsigned int bytes,
struct kvm_vcpu *vcpu);
int emulator_write_emulated(unsigned long addr,
const void *val,
unsigned int bytes,
@@ -737,6 +766,10 @@ enum {
TASK_SWITCH_GATE = 3,
};
#define HF_GIF_MASK (1 << 0)
#define HF_HIF_MASK (1 << 1)
#define HF_VINTR_MASK (1 << 2)
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.

View File

@@ -52,70 +52,14 @@
#endif
#define GLOBAL(name) \
.globl name; \
name:
#ifdef CONFIG_X86_ALIGNMENT_16
#define __ALIGN .align 16,0x90
#define __ALIGN_STR ".align 16,0x90"
#endif
/*
* to check ENTRY_X86/END_X86 and
* KPROBE_ENTRY_X86/KPROBE_END_X86
* unbalanced-missed-mixed appearance
*/
#define __set_entry_x86 .set ENTRY_X86_IN, 0
#define __unset_entry_x86 .set ENTRY_X86_IN, 1
#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
#define __check_entry_x86 \
.ifdef ENTRY_X86_IN; \
.ifeq ENTRY_X86_IN; \
__macro_err_x86; \
.abort; \
.endif; \
.endif
#define __check_kprobe_x86 \
.ifdef KPROBE_X86_IN; \
.ifeq KPROBE_X86_IN; \
__macro_err_x86; \
.abort; \
.endif; \
.endif
#define __check_entry_kprobe_x86 \
__check_entry_x86; \
__check_kprobe_x86
#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
#define ENTRY_X86(name) \
__check_entry_kprobe_x86; \
__set_entry_x86; \
.globl name; \
__ALIGN; \
name:
#define END_X86(name) \
__unset_entry_x86; \
__check_entry_kprobe_x86; \
.size name, .-name
#define KPROBE_ENTRY_X86(name) \
__check_entry_kprobe_x86; \
__set_kprobe_x86; \
.pushsection .kprobes.text, "ax"; \
.globl name; \
__ALIGN; \
name:
#define KPROBE_END_X86(name) \
__unset_kprobe_x86; \
__check_entry_kprobe_x86; \
.size name, .-name; \
.popsection
#endif /* _ASM_X86_LINKAGE_H */

View File

@@ -1,168 +0,0 @@
#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
#ifdef CONFIG_X86_LOCAL_APIC
#include <mach_apicdef.h>
#include <asm/smp.h>
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline const struct cpumask *target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
#define NO_BALANCE_IRQ (0)
#define esr_disable (0)
#ifdef CONFIG_X86_64
#include <asm/genapic.h>
#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
#define INT_DEST_MODE (genapic->int_dest_mode)
#define TARGET_CPUS (genapic->target_cpus())
#define apic_id_registered (genapic->apic_id_registered)
#define init_apic_ldr (genapic->init_apic_ldr)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
#define phys_pkg_id (genapic->phys_pkg_id)
#define vector_allocation_domain (genapic->vector_allocation_domain)
#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
#define send_IPI_self (genapic->send_IPI_self)
#define wakeup_secondary_cpu (genapic->wakeup_cpu)
extern void setup_apic_routing(void);
#else
#define INT_DELIVERY_MODE dest_LowestPrio
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
#define TARGET_CPUS (target_cpus())
#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static inline void init_apic_ldr(void)
{
unsigned long val;
apic_write(APIC_DFR, APIC_DFR_VALUE);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
apic_write(APIC_LDR, val);
}
static inline int apic_id_registered(void)
{
return physid_isset(read_apic_id(), phys_cpu_present_map);
}
static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return cpumask_bits(cpumask)[0];
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
return (unsigned int)(mask1 & mask2 & mask3);
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
static inline void setup_apic_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Flat", nr_ioapics);
#endif
}
static inline int apicid_to_node(int logical_apicid)
{
#ifdef CONFIG_SMP
return apicid_2_node[hard_smp_processor_id()];
#else
return 0;
#endif
}
static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
*retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
}
#endif
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return physid_isset(apicid, bitmap);
}
static inline unsigned long check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
return phys_map;
}
static inline int multi_timer_check(int apic, int irq)
{
return 0;
}
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
return 1 << cpu;
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
static inline void setup_portio_remap(void)
{
}
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
}
static inline void enable_apic_mode(void)
{
}
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */

View File

@@ -1,24 +0,0 @@
#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
#include <asm/apic.h>
#ifdef CONFIG_X86_64
#define APIC_ID_MASK (genapic->apic_id_mask)
#define GET_APIC_ID(x) (genapic->get_apic_id(x))
#define SET_APIC_ID(x) (genapic->set_apic_id(x))
#else
#define APIC_ID_MASK (0xF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
if (APIC_XAPIC(ver))
return (((x)>>24)&0xFF);
else
return (((x)>>24)&0xF);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif
#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */

View File

@@ -1,64 +0,0 @@
#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
/* Avoid include hell */
#define NMI_VECTOR 0x02
void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
void __send_IPI_shortcut(unsigned int shortcut, int vector);
extern int no_broadcast;
#ifdef CONFIG_X86_64
#include <asm/genapic.h>
#define send_IPI_mask (genapic->send_IPI_mask)
#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
#else
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_bitmask(mask, vector);
}
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
#endif
static inline void __local_send_IPI_allbutself(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
send_IPI_mask_allbutself(cpu_online_mask, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
}
static inline void __local_send_IPI_all(int vector)
{
if (no_broadcast || vector == NMI_VECTOR)
send_IPI_mask(cpu_online_mask, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
#ifdef CONFIG_X86_64
#define send_IPI_allbutself (genapic->send_IPI_allbutself)
#define send_IPI_all (genapic->send_IPI_all)
#else
static inline void send_IPI_allbutself(int vector)
{
/*
* if there are no other CPUs in the system then we get an APIC send
* error if we try to broadcast, thus avoid sending IPIs in this case.
*/
if (!(num_online_cpus() > 1))
return;
__local_send_IPI_allbutself(vector);
return;
}
static inline void send_IPI_all(int vector)
{
__local_send_IPI_all(vector);
}
#endif
#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */

View File

@@ -1,17 +0,0 @@
#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
static inline int
mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
{
return 0;
}
/* Hook from generic ACPI tables.c */
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return 0;
}
#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */

View File

@@ -1,12 +0,0 @@
#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
#define MAX_IRQ_SOURCES 256
#if CONFIG_BASE_SMALL == 0
#define MAX_MP_BUSSES 256
#else
#define MAX_MP_BUSSES 32
#endif
#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */

View File

@@ -1,41 +0,0 @@
#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
#define TRAMPOLINE_PHYS_LOW (0x467)
#define TRAMPOLINE_PHYS_HIGH (0x469)
static inline void wait_for_init_deassert(atomic_t *deassert)
{
while (!atomic_read(deassert))
cpu_relax();
return;
}
/* Nothing to do for most platforms, since cleared by the INIT cycle */
static inline void smp_callin_clear_local_apic(void)
{
}
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
{
}
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
}
#ifdef CONFIG_SMP
extern void __inquire_remote_apic(int apicid);
#else /* CONFIG_SMP */
static inline void __inquire_remote_apic(int apicid)
{
}
#endif /* CONFIG_SMP */
static inline void inquire_remote_apic(int apicid)
{
if (apic_verbosity >= APIC_DEBUG)
__inquire_remote_apic(apicid);
}
#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */

View File

@@ -1,15 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
#define _ASM_X86_MACH_GENERIC_GPIO_H
int gpio_request(unsigned gpio, const char *label);
void gpio_free(unsigned gpio);
int gpio_direction_input(unsigned gpio);
int gpio_direction_output(unsigned gpio, int value);
int gpio_get_value(unsigned gpio);
void gpio_set_value(unsigned gpio, int value);
int gpio_to_irq(unsigned gpio);
int irq_to_gpio(unsigned irq);
#include <asm-generic/gpio.h> /* cansleep wrappers */
#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */

View File

@@ -1,35 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
#include <asm/genapic.h>
#define esr_disable (genapic->ESR_DISABLE)
#define NO_BALANCE_IRQ (genapic->no_balance_irq)
#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
#define INT_DEST_MODE (genapic->int_dest_mode)
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
#define TARGET_CPUS (genapic->target_cpus())
#define apic_id_registered (genapic->apic_id_registered)
#define init_apic_ldr (genapic->init_apic_ldr)
#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
#define setup_apic_routing (genapic->setup_apic_routing)
#define multi_timer_check (genapic->multi_timer_check)
#define apicid_to_node (genapic->apicid_to_node)
#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
#define setup_portio_remap (genapic->setup_portio_remap)
#define check_apicid_present (genapic->check_apicid_present)
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
#define check_apicid_used (genapic->check_apicid_used)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
#define vector_allocation_domain (genapic->vector_allocation_domain)
#define enable_apic_mode (genapic->enable_apic_mode)
#define phys_pkg_id (genapic->phys_pkg_id)
#define wakeup_secondary_cpu (genapic->wakeup_cpu)
extern void generic_bigsmp_probe(void);
#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */

View File

@@ -1,11 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
#ifndef APIC_DEFINITION
#include <asm/genapic.h>
#define GET_APIC_ID (genapic->get_apic_id)
#define APIC_ID_MASK (genapic->apic_id_mask)
#endif
#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */

View File

@@ -1,10 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
#include <asm/genapic.h>
#define send_IPI_mask (genapic->send_IPI_mask)
#define send_IPI_allbutself (genapic->send_IPI_allbutself)
#define send_IPI_all (genapic->send_IPI_all)
#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */

View File

@@ -1,9 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
extern int mps_oem_check(struct mpc_table *, char *, char *);
extern int acpi_madt_oem_check(char *, char *);
#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */

View File

@@ -1,12 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
#define MAX_IRQ_SOURCES 256
/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
#define MAX_MP_BUSSES 260
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */

View File

@@ -1,12 +0,0 @@
#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
#define wait_for_init_deassert (genapic->wait_for_init_deassert)
#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
#define store_NMI_vector (genapic->store_NMI_vector)
#define restore_NMI_vector (genapic->restore_NMI_vector)
#define inquire_remote_apic (genapic->inquire_remote_apic)
#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */

View File

@@ -1,60 +0,0 @@
#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
#define _ASM_X86_MACH_RDC321X_GPIO_H
#include <linux/kernel.h>
extern int rdc_gpio_get_value(unsigned gpio);
extern void rdc_gpio_set_value(unsigned gpio, int value);
extern int rdc_gpio_direction_input(unsigned gpio);
extern int rdc_gpio_direction_output(unsigned gpio, int value);
extern int rdc_gpio_request(unsigned gpio, const char *label);
extern void rdc_gpio_free(unsigned gpio);
extern void __init rdc321x_gpio_setup(void);
/* Wrappers for the arch-neutral GPIO API */
static inline int gpio_request(unsigned gpio, const char *label)
{
return rdc_gpio_request(gpio, label);
}
static inline void gpio_free(unsigned gpio)
{
might_sleep();
rdc_gpio_free(gpio);
}
static inline int gpio_direction_input(unsigned gpio)
{
return rdc_gpio_direction_input(gpio);
}
static inline int gpio_direction_output(unsigned gpio, int value)
{
return rdc_gpio_direction_output(gpio, value);
}
static inline int gpio_get_value(unsigned gpio)
{
return rdc_gpio_get_value(gpio);
}
static inline void gpio_set_value(unsigned gpio, int value)
{
rdc_gpio_set_value(gpio, value);
}
static inline int gpio_to_irq(unsigned gpio)
{
return gpio;
}
static inline int irq_to_gpio(unsigned irq)
{
return irq;
}
/* For cansleep */
#include <asm-generic/gpio.h>
#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */

View File

@@ -1,17 +0,0 @@
/* defines for inline arch setup functions */
#include <linux/clockchips.h>
#include <asm/voyager.h>
#include <asm/i8253.h>
/**
* do_timer_interrupt_hook - hook into timer tick
*
* Call the pit clock event handler. see asm/i8253.h
**/
static inline void do_timer_interrupt_hook(void)
{
global_clock_event->event_handler(global_clock_event);
voyager_timer_interrupt();
}

View File

@@ -1,26 +0,0 @@
/* -*- mode: c; c-basic-offset: 8 -*- */
/* Copyright (C) 2002
*
* Author: James.Bottomley@HansenPartnership.com
*
* linux/arch/i386/voyager/entry_arch.h
*
* This file builds the VIC and QIC CPI gates
*/
/* initialise the voyager interrupt gates
*
* This uses the macros in irq.h to set up assembly jump gates. The
* calls are then redirected to the same routine with smp_ prefixed */
BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
/* do all the QIC interrupts */
BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);

View File

@@ -1,12 +0,0 @@
#include <asm/voyager.h>
#include <asm/setup.h>
#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
(&boot_params.apm_bios_info))
/* Hook to call BIOS initialisation function */
/* for voyager, pass the voyager BIOS/SUS info area to the detection
* routines */
#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);

View File

@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_X86_32
# include "mmu_context_32.h"
#else
# include "mmu_context_64.h"
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
/* Re-load page tables */
load_cr3(next->pgd);
/*
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
}
}
#endif
}
#define activate_mm(prev, next) \
do { \
@@ -33,5 +76,17 @@ do { \
switch_mm((prev), (next), NULL); \
} while (0);
#ifdef CONFIG_X86_32
#define deactivate_mm(tsk, mm) \
do { \
lazy_load_gs(0); \
} while (0)
#else
#define deactivate_mm(tsk, mm) \
do { \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
#endif
#endif /* _ASM_X86_MMU_CONTEXT_H */

View File

@@ -1,55 +0,0 @@
#ifndef _ASM_X86_MMU_CONTEXT_32_H
#define _ASM_X86_MMU_CONTEXT_32_H
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
x86_write_percpu(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
/* Re-load page tables */
load_cr3(next->pgd);
/*
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload %cr3.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
}
}
#endif
}
#define deactivate_mm(tsk, mm) \
asm("movl %0,%%gs": :"r" (0));
#endif /* _ASM_X86_MMU_CONTEXT_32_H */

View File

@@ -1,54 +0,0 @@
#ifndef _ASM_X86_MMU_CONTEXT_64_H
#define _ASM_X86_MMU_CONTEXT_64_H
#include <asm/pda.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
write_pda(mmu_state, TLBSTATE_OK);
write_pda(active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
write_pda(mmu_state, TLBSTATE_OK);
if (read_pda(active_mm) != next)
BUG();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
}
}
#endif
}
#define deactivate_mm(tsk, mm) \
do { \
load_gs_index(0); \
asm volatile("movl %0,%%fs"::"r"(0)); \
} while (0)
#endif /* _ASM_X86_MMU_CONTEXT_64_H */

View File

@@ -91,46 +91,9 @@ static inline int pfn_valid(int pfn)
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_NEED_MULTIPLE_NODES
/*
* Following are macros that are specific to this numa platform.
*/
#define reserve_bootmem(addr, size, flags) \
reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
#define alloc_bootmem(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
__pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_nopanic(x) \
__alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
__pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
({ \
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
__pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_pages_node(pgdat, x) \
({ \
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
__pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_low_pages_node(pgdat, x) \
({ \
struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
})
/* always use node 0 for bootmem on this numa platform */
#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
(NODE_DATA(0)->bdata)
#endif /* CONFIG_NEED_MULTIPLE_NODES */
#endif /* _ASM_X86_MMZONE_32_H */

View File

@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
extern int pic_mode;
#ifdef CONFIG_X86_32
#include <mach_mpspec.h>
/*
* Summit or generic (i.e. installer) kernels need lots of bus entries.
* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
*/
#if CONFIG_BASE_SMALL == 0
# define MAX_MP_BUSSES 260
#else
# define MAX_MP_BUSSES 32
#endif
#define MAX_IRQ_SOURCES 256
extern unsigned int def_to_bigsmp;
extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
#endif
#define MAX_APICID 256
#define MAX_APICID 256
#else
#else /* CONFIG_X86_64: */
#define MAX_MP_BUSSES 256
#define MAX_MP_BUSSES 256
/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
#endif
#endif /* CONFIG_X86_64 */
extern void early_find_smp_config(void);
extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
extern void find_smp_config(void);
extern void get_smp_config(void);
#ifdef CONFIG_X86_MPPARSE
extern void find_smp_config(void);
extern void early_reserve_e820_mpc_new(void);
#else
static inline void find_smp_config(void) { }
static inline void early_reserve_e820_mpc_new(void) { }
#endif
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
#ifdef CONFIG_X86_IO_APIC
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity);
extern int mp_find_ioapic(int gsi);
extern int mp_find_ioapic_pin(int ioapic, int gsi);
#else
static inline int
mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,8 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
extern physid_mask_t phys_cpu_present_map;
extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
extern int default_acpi_madt_oem_check(char *, char *);
#endif /* _ASM_X86_MPSPEC_H */

View File

@@ -24,17 +24,18 @@
# endif
#endif
struct intel_mp_floating {
char mpf_signature[4]; /* "_MP_" */
unsigned int mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
unsigned char mpf_specification;/* Specification version */
unsigned char mpf_checksum; /* Checksum (makes sum 0) */
unsigned char mpf_feature1; /* Standard or configuration ? */
unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
unsigned char mpf_feature3; /* Unused (0) */
unsigned char mpf_feature4; /* Unused (0) */
unsigned char mpf_feature5; /* Unused (0) */
/* Intel MP Floating Pointer Structure */
struct mpf_intel {
char signature[4]; /* "_MP_" */
unsigned int physptr; /* Configuration table address */
unsigned char length; /* Our length (paragraphs) */
unsigned char specification; /* Specification version */
unsigned char checksum; /* Checksum (makes sum 0) */
unsigned char feature1; /* Standard or configuration ? */
unsigned char feature2; /* Bit7 set for IMCR|PIC */
unsigned char feature3; /* Unused (0) */
unsigned char feature4; /* Unused (0) */
unsigned char feature5; /* Unused (0) */
};
#define MPC_SIGNATURE "PCMP"

View File

@@ -18,11 +18,15 @@
#define _EFER_LME 8 /* Long mode enable */
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_FFXSR (1<<_EFER_FFXSR)
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
@@ -360,4 +364,9 @@
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_HSAVE_PA 0xc0010117
#endif /* _ASM_X86_MSR_INDEX_H */

View File

@@ -4,8 +4,12 @@
extern int pxm_to_nid(int pxm);
extern void numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA
#ifdef CONFIG_HIGHMEM
extern void set_highmem_pages_init(void);
#else
static inline void set_highmem_pages_init(void)
{
}
#endif
#endif /* _ASM_X86_NUMA_32_H */

View File

@@ -31,6 +31,8 @@
extern int found_numaq;
extern int get_memcfg_numaq(void);
extern void *xquad_portio;
/*
* SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
*/

View File

@@ -1,142 +0,0 @@
#ifndef __ASM_NUMAQ_APIC_H
#define __ASM_NUMAQ_APIC_H
#include <asm/io.h>
#include <linux/mmzone.h>
#include <linux/nodemask.h>
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static inline const cpumask_t *target_cpus(void)
{
return &CPU_MASK_ALL;
}
#define NO_BALANCE_IRQ (1)
#define esr_disable (1)
#define INT_DELIVERY_MODE dest_LowestPrio
#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return physid_isset(apicid, bitmap);
}
static inline unsigned long check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
#define apicid_cluster(apicid) (apicid & 0xF0)
static inline int apic_id_registered(void)
{
return 1;
}
static inline void init_apic_ldr(void)
{
/* Already done in NUMA-Q firmware */
}
static inline void setup_apic_routing(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"NUMA-Q", nr_ioapics);
}
/*
* Skip adding the timer int on secondary nodes, which causes
* a small but painful rift in the time-space continuum.
*/
static inline int multi_timer_check(int apic, int irq)
{
return apic != 0 && irq == 0;
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* We don't have a good way to do this yet - hack */
return physids_promote(0xFUL);
}
/* Mapping from cpu number to logical apicid */
extern u8 cpu_2_logical_apicid[];
static inline int cpu_to_logical_apicid(int cpu)
{
if (cpu >= nr_cpu_ids)
return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu];
}
/*
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent
* cpu to APIC ID relation to properly interact with the intelligent
* mode of the cluster controller.
*/
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < 60)
return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
else
return BAD_APICID;
}
static inline int apicid_to_node(int logical_apicid)
{
return logical_apicid >> 4;
}
static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
{
int node = apicid_to_node(logical_apicid);
int cpu = __ffs(logical_apicid & 0xf);
return physid_mask_of_physid(cpu + 4*node);
}
extern void *xquad_portio;
static inline void setup_portio_remap(void)
{
int num_quads = num_online_nodes();
if (num_quads <= 1)
return;
printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
(u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
}
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return (1);
}
static inline void enable_apic_mode(void)
{
}
/*
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{
return (int) 0xF;
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
return (int) 0xF;
}
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif /* __ASM_NUMAQ_APIC_H */

View File

@@ -1,14 +0,0 @@
#ifndef __ASM_NUMAQ_APICDEF_H
#define __ASM_NUMAQ_APICDEF_H
#define APIC_ID_MASK (0xF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (((x)>>24)&0x0F);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif

View File

@@ -1,22 +0,0 @@
#ifndef __ASM_NUMAQ_IPI_H
#define __ASM_NUMAQ_IPI_H
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
send_IPI_mask_allbutself(cpu_online_mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_mask, vector);
}
#endif /* __ASM_NUMAQ_IPI_H */

View File

@@ -1,6 +0,0 @@
#ifndef __ASM_NUMAQ_MPPARSE_H
#define __ASM_NUMAQ_MPPARSE_H
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
#endif /* __ASM_NUMAQ_MPPARSE_H */

View File

@@ -1,45 +0,0 @@
#ifndef __ASM_NUMAQ_WAKECPU_H
#define __ASM_NUMAQ_WAKECPU_H
/* This file copes with machines that wakeup secondary CPUs by NMIs */
#define TRAMPOLINE_PHYS_LOW (0x8)
#define TRAMPOLINE_PHYS_HIGH (0xa)
/* We don't do anything here because we use NMI's to boot instead */
static inline void wait_for_init_deassert(atomic_t *deassert)
{
}
/*
* Because we use NMIs rather than the INIT-STARTUP sequence to
* bootstrap the CPUs, the APIC may be in a weird state. Kick it.
*/
static inline void smp_callin_clear_local_apic(void)
{
clear_local_APIC();
}
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
{
printk("Storing NMI vector\n");
*high =
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
*low =
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
}
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
printk("Restoring NMI vector\n");
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
*high;
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
*low;
}
static inline void inquire_remote_apic(int apicid)
{
}
#endif /* __ASM_NUMAQ_WAKECPU_H */

View File

@@ -1,42 +1,11 @@
#ifndef _ASM_X86_PAGE_H
#define _ASM_X86_PAGE_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <linux/types.h>
#ifdef __KERNEL__
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#include <asm/page_types.h>
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
@@ -44,38 +13,18 @@
#include <asm/page_32.h>
#endif /* CONFIG_X86_64 */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifndef __ASSEMBLY__
typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pgprotval_t pgprot; } pgprot_t;
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
struct page;
static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
struct page *pg)
{
clear_page(page);
}
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *topage)
struct page *topage)
{
copy_page(to, from);
}
@@ -84,99 +33,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
static inline pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val };
}
static inline pgdval_t native_pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
#if PAGETABLE_LEVELS >= 3
#if PAGETABLE_LEVELS == 4
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
{
return (pud_t) { val };
}
static inline pudval_t native_pud_val(pud_t pud)
{
return pud.pud;
}
#else /* PAGETABLE_LEVELS == 3 */
#include <asm-generic/pgtable-nopud.h>
static inline pudval_t native_pud_val(pud_t pud)
{
return native_pgd_val(pud.pgd);
}
#endif /* PAGETABLE_LEVELS == 4 */
typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val)
{
return (pmd_t) { val };
}
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
#else /* PAGETABLE_LEVELS == 2 */
#include <asm-generic/pgtable-nopmd.h>
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return native_pgd_val(pmd.pud.pgd);
}
#endif /* PAGETABLE_LEVELS >= 3 */
static inline pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte = val };
}
static inline pteval_t native_pte_val(pte_t pte)
{
return pte.pte;
}
static inline pteval_t native_pte_flags(pte_t pte)
{
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x) native_pud_val(x)
#define __pud(x) native_make_pud(x)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x) native_pmd_val(x)
#define __pmd(x) native_make_pmd(x)
#endif
#define pte_val(x) native_pte_val(x)
#define pte_flags(x) native_pte_flags(x)
#define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */
#define __pa(x) __phys_addr((unsigned long)(x))
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
/* __pa_symbol should be used for C visible symbols.

View File

@@ -1,82 +1,14 @@
#ifndef _ASM_X86_PAGE_32_H
#define _ASM_X86_PAGE_32_H
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#include <asm/page_32_types.h>
#ifndef __ASSEMBLY__
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
typedef union {
struct {
unsigned long pte_low, pte_high;
};
pteval_t pte;
} pte_t;
#endif /* __ASSEMBLY__
*/
#else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#ifndef __ASSEMBLY__
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
typedef struct page *pgtable_t;
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#ifndef __ASSEMBLY__
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
@@ -89,23 +21,6 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
extern int nx_enabled;
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>

View File

@@ -0,0 +1,60 @@
#ifndef _ASM_X86_PAGE_32_DEFS_H
#define _ASM_X86_PAGE_32_DEFS_H
#include <linux/const.h>
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#endif /* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_DEFS_H */

View File

@@ -1,105 +1,6 @@
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQSTACK_ORDER 2
#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void clear_page(void *page);
void copy_page(void *to, void *from);
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
/*
* These are used to make use of C type-checking..
*/
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct page *pgtable_t;
typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#include <asm/page_64_types.h>
#endif /* _ASM_X86_PAGE_64_H */

View File

@@ -0,0 +1,89 @@
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void clear_page(void *page);
void copy_page(void *to, void *from);
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
#define vmemmap ((struct page *)VMEMMAP_START)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */

View File

@@ -0,0 +1,57 @@
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
#include <asm/page_32_types.h>
#endif /* CONFIG_X86_64 */
#ifndef __ASSEMBLY__
struct pgprot;
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_DEFS_H */

View File

@@ -4,7 +4,7 @@
* para-virtualization: those hooks are defined here. */
#ifdef CONFIG_PARAVIRT
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/asm.h>
/* Bitmask of what can be clobbered: usually at least eax. */
@@ -12,21 +12,38 @@
#define CLBR_EAX (1 << 0)
#define CLBR_ECX (1 << 1)
#define CLBR_EDX (1 << 2)
#define CLBR_EDI (1 << 3)
#ifdef CONFIG_X86_64
#define CLBR_RSI (1 << 3)
#define CLBR_RDI (1 << 4)
#ifdef CONFIG_X86_32
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
#define CLBR_ANY ((1 << 4) - 1)
#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
#define CLBR_SCRATCH (0)
#else
#define CLBR_RAX CLBR_EAX
#define CLBR_RCX CLBR_ECX
#define CLBR_RDX CLBR_EDX
#define CLBR_RDI CLBR_EDI
#define CLBR_RSI (1 << 4)
#define CLBR_R8 (1 << 5)
#define CLBR_R9 (1 << 6)
#define CLBR_R10 (1 << 7)
#define CLBR_R11 (1 << 8)
#define CLBR_ANY ((1 << 9) - 1)
#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
CLBR_RCX | CLBR_R8 | CLBR_R9)
#define CLBR_RET_REG (CLBR_RAX)
#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
#include <asm/desc_defs.h>
#else
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
#define CLBR_ANY ((1 << 3) - 1)
#endif /* X86_64 */
#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
struct mm_struct;
struct desc_struct;
/*
* Wrapper type for pointers to code which uses the non-standard
* calling convention. See PV_CALL_SAVE_REGS_THUNK below.
*/
struct paravirt_callee_save {
void *func;
};
/* general info */
struct pv_info {
unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
* expected to use X86_EFLAGS_IF; all other bits
* returned from save_fl are undefined, and may be ignored by
* restore_fl.
*
* NOTE: These functions callers expect the callee to preserve
* more registers than the standard C calling convention.
*/
unsigned long (*save_fl)(void);
void (*restore_fl)(unsigned long);
void (*irq_disable)(void);
void (*irq_enable)(void);
struct paravirt_callee_save save_fl;
struct paravirt_callee_save restore_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
void (*safe_halt)(void);
void (*halt)(void);
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm,
unsigned long va);
/* Hooks for allocating and freeing a pagetable top-level */
@@ -278,12 +308,11 @@ struct pv_mmu_ops {
void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pteval_t (*pte_val)(pte_t);
pteval_t (*pte_flags)(pte_t);
pte_t (*make_pte)(pteval_t pte);
struct paravirt_callee_save pte_val;
struct paravirt_callee_save make_pte;
pgdval_t (*pgd_val)(pgd_t);
pgd_t (*make_pgd)(pgdval_t pgd);
struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
@@ -298,12 +327,12 @@ struct pv_mmu_ops {
void (*set_pud)(pud_t *pudp, pud_t pudval);
pmdval_t (*pmd_val)(pmd_t);
pmd_t (*make_pmd)(pmdval_t pmd);
struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd;
#if PAGETABLE_LEVELS == 4
pudval_t (*pud_val)(pud_t);
pud_t (*make_pud)(pudval_t pud);
struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
#endif /* PAGETABLE_LEVELS == 4 */
@@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
@@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
* makes sure the incoming and outgoing types are always correct.
*/
#ifdef CONFIG_X86_32
#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
#define PVOP_VCALL_ARGS \
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else
#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
#else /* CONFIG_X86_64 */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
"=S" (__esi), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
#endif
#endif /* CONFIG_X86_32 */
#ifdef CONFIG_PARAVIRT_DEBUG
#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
#define PVOP_TEST_NULL(op) ((void)op)
#endif
#define __PVOP_CALL(rettype, op, pre, post, ...) \
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
pre, post, ...) \
({ \
rettype __ret; \
PVOP_CALL_ARGS; \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
/* This is 32-bit specific, but is okay in 64-bit */ \
/* since this condition will never hold */ \
@@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: PVOP_CALL_CLOBBERS \
: call_clbr \
: paravirt_type(op), \
paravirt_clobber(CLBR_ANY), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" EXTRA_CLOBBERS); \
: "memory", "cc" extra_clbr); \
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: PVOP_CALL_CLOBBERS \
: call_clbr \
: paravirt_type(op), \
paravirt_clobber(CLBR_ANY), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" EXTRA_CLOBBERS); \
: "memory", "cc" extra_clbr); \
__ret = (rettype)__eax; \
} \
__ret; \
})
#define __PVOP_VCALL(op, pre, post, ...) \
#define __PVOP_CALL(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
PVOP_CALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)
#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
({ \
PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: PVOP_VCALL_CLOBBERS \
: call_clbr \
: paravirt_type(op), \
paravirt_clobber(CLBR_ANY), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" VEXTRA_CLOBBERS); \
: "memory", "cc" extra_clbr); \
})
#define __PVOP_VCALL(op, pre, post, ...) \
____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__)
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)
#define PVOP_CALL0(rettype, op) \
__PVOP_CALL(rettype, op, "", "")
#define PVOP_VCALL0(op) \
__PVOP_VCALL(op, "", "")
#define PVOP_CALLEE0(rettype, op) \
__PVOP_CALLEESAVE(rettype, op, "", "")
#define PVOP_VCALLEE0(op) \
__PVOP_VCALLEESAVE(op, "", "")
#define PVOP_CALL1(rettype, op, arg1) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_VCALL1(op, arg1) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_CALLEE1(rettype, op, arg1) \
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_VCALLEE1(op, arg1) \
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_CALL2(rettype, op, arg1, arg2) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
"1" ((unsigned long)(arg2)))
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_VCALL2(op, arg1, arg2) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
"1" ((unsigned long)(arg2)))
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_VCALLEE2(op, arg1, arg2) \
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
/* This is the only difference in x86_64. We can make it much simpler */
#ifdef CONFIG_X86_32
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
"0" ((u32)(arg1)), "1" ((u32)(arg2)), \
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
#else
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
"3"((unsigned long)(arg4)))
__PVOP_CALL(rettype, op, "", "", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
"3"((unsigned long)(arg4)))
__PVOP_VCALL(op, "", "", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#endif
static inline int paravirt_enabled(void)
@@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
}
static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va)
{
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t,
pv_mmu_ops.make_pte,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pteval_t,
pv_mmu_ops.make_pte,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pteval_t,
pv_mmu_ops.make_pte,
val);
ret = PVOP_CALLEE1(pteval_t,
pv_mmu_ops.make_pte,
val);
return (pte_t) { .pte = ret };
}
@@ -1075,42 +1171,25 @@ static inline pteval_t pte_val(pte_t pte)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
pte.pte, (u64)pte.pte >> 32);
ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
pte.pte);
ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
pte.pte);
return ret;
}
static inline pteval_t pte_flags(pte_t pte)
{
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
pte.pte);
#ifdef CONFIG_PARAVIRT_DEBUG
BUG_ON(ret & PTE_PFN_MASK);
#endif
return ret;
}
static inline pgd_t __pgd(pgdval_t val)
{
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
val);
ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
val);
return (pgd_t) { ret };
}
@@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
else
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd);
ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd);
return ret;
}
@@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
val);
ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
val);
return (pmd_t) { ret };
}
@@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd);
ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd);
return ret;
}
@@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
val);
ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
val);
return (pud_t) { ret };
}
@@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
pud.pud, (u64)pud.pud >> 32);
ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
pud.pud, (u64)pud.pud >> 32);
else
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
pud.pud);
ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
pud.pud);
return ret;
}
@@ -1374,9 +1453,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
}
void _paravirt_nop(void);
#define paravirt_nop ((void *)_paravirt_nop)
u32 _paravirt_ident_32(u32);
u64 _paravirt_ident_64(u64);
void paravirt_use_bytelocks(void);
#define paravirt_nop ((void *)_paravirt_nop)
#ifdef CONFIG_SMP
@@ -1426,12 +1506,37 @@ extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS \
"push %rcx;" \
"push %rdx;" \
"push %rsi;" \
"push %rdi;" \
"push %r8;" \
"push %r9;" \
"push %r10;" \
"push %r11;"
#define PV_RESTORE_ALL_CALLER_REGS \
"pop %r11;" \
"pop %r10;" \
"pop %r9;" \
"pop %r8;" \
"pop %rdi;" \
"pop %rsi;" \
"pop %rdx;" \
"pop %rcx;"
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1441,52 +1546,76 @@ extern struct paravirt_patch_site __parainstructions[],
#define PV_FLAGS_ARG "D"
#endif
/*
* Generate a thunk around a function which saves all caller-save
* registers except for the return value. This allows C functions to
* be called from assembler code where fewer than normal registers are
* available. It may also help code generation around calls from C
* code if the common case doesn't use many registers.
*
* When a callee is wrapped in a thunk, the caller can assume that all
* arg regs and all scratch registers are preserved across the
* call. The return value in rax/eax will not be saved, even for void
* functions.
*/
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
extern typeof(func) __raw_callee_save_##func; \
static void *__##func##__ __used = func; \
\
asm(".pushsection .text;" \
"__raw_callee_save_" #func ": " \
PV_SAVE_ALL_CALLER_REGS \
"call " #func ";" \
PV_RESTORE_ALL_CALLER_REGS \
"ret;" \
".popsection")
/* Get a reference to a callee-save function */
#define PV_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { __raw_callee_save_##func })
/* Promise that "func" already uses the right calling convention */
#define __PV_IS_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { func })
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_VEXTRA_CLOBBERS);
: "memory", "cc");
return f;
}
static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_EXTRA_CLOBBERS);
: "memory", "cc");
}
static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
: "memory", "eax", "cc");
}
static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
: "memory", "eax", "cc");
}
static inline unsigned long __raw_local_irq_save(void)
@@ -1529,33 +1658,49 @@ static inline unsigned long __raw_local_irq_save(void)
.popsection
#define COND_PUSH(set, mask, reg) \
.if ((~(set)) & mask); push %reg; .endif
#define COND_POP(set, mask, reg) \
.if ((~(set)) & mask); pop %reg; .endif
#ifdef CONFIG_X86_64
#define PV_SAVE_REGS \
push %rax; \
push %rcx; \
push %rdx; \
push %rsi; \
push %rdi; \
push %r8; \
push %r9; \
push %r10; \
push %r11
#define PV_RESTORE_REGS \
pop %r11; \
pop %r10; \
pop %r9; \
pop %r8; \
pop %rdi; \
pop %rsi; \
pop %rdx; \
pop %rcx; \
pop %rax
#define PV_SAVE_REGS(set) \
COND_PUSH(set, CLBR_RAX, rax); \
COND_PUSH(set, CLBR_RCX, rcx); \
COND_PUSH(set, CLBR_RDX, rdx); \
COND_PUSH(set, CLBR_RSI, rsi); \
COND_PUSH(set, CLBR_RDI, rdi); \
COND_PUSH(set, CLBR_R8, r8); \
COND_PUSH(set, CLBR_R9, r9); \
COND_PUSH(set, CLBR_R10, r10); \
COND_PUSH(set, CLBR_R11, r11)
#define PV_RESTORE_REGS(set) \
COND_POP(set, CLBR_R11, r11); \
COND_POP(set, CLBR_R10, r10); \
COND_POP(set, CLBR_R9, r9); \
COND_POP(set, CLBR_R8, r8); \
COND_POP(set, CLBR_RDI, rdi); \
COND_POP(set, CLBR_RSI, rsi); \
COND_POP(set, CLBR_RDX, rdx); \
COND_POP(set, CLBR_RCX, rcx); \
COND_POP(set, CLBR_RAX, rax)
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
#else
#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
#define PV_SAVE_REGS(set) \
COND_PUSH(set, CLBR_EAX, eax); \
COND_PUSH(set, CLBR_EDI, edi); \
COND_PUSH(set, CLBR_ECX, ecx); \
COND_PUSH(set, CLBR_EDX, edx)
#define PV_RESTORE_REGS(set) \
COND_POP(set, CLBR_EDX, edx); \
COND_POP(set, CLBR_ECX, ecx); \
COND_POP(set, CLBR_EDI, edi); \
COND_POP(set, CLBR_EAX, eax)
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
#define PARA_INDIRECT(addr) *%cs:addr
@@ -1567,15 +1712,15 @@ static inline unsigned long __raw_local_irq_save(void)
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS; \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
PV_RESTORE_REGS;) \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS; \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
PV_RESTORE_REGS;)
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define USERGS_SYSRET32 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1605,11 +1750,15 @@ static inline unsigned long __raw_local_irq_save(void)
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
swapgs)
/*
* Note: swapgs is very special, and in practise is either going to be
* implemented with a single "swapgs" instruction or something very
* special. Either way, we don't need to save any registers for
* it.
*/
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
PV_SAVE_REGS; \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
PV_RESTORE_REGS \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
)
#define GET_CR2_INTO_RCX \

View File

@@ -5,10 +5,8 @@
#ifdef CONFIG_X86_PAT
extern int pat_enabled;
extern void validate_pat_support(struct cpuinfo_x86 *c);
#else
static const int pat_enabled;
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif
extern void pat_init(void);
@@ -17,6 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type);
extern int free_memtype(u64 start, u64 end);
extern void pat_disable(char *reason);
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
unsigned long flag);
#endif /* _ASM_X86_PAT_H */

View File

@@ -1,137 +0,0 @@
#ifndef _ASM_X86_PDA_H
#define _ASM_X86_PDA_H
#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <asm/page.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
struct task_struct *pcurrent; /* 0 Current process */
unsigned long data_offset; /* 8 Per cpu data offset from linker
address */
unsigned long kernelstack; /* 16 top of kernel stack for current */
unsigned long oldrsp; /* 24 user rsp for system call */
int irqcount; /* 32 Irq nesting counter. Starts -1 */
unsigned int cpunumber; /* 36 Logical CPU number */
#ifdef CONFIG_CC_STACKPROTECTOR
unsigned long stack_canary; /* 40 stack canary value */
/* gcc-ABI: this canary MUST be at
offset 40!!! */
#endif
char *irqstackptr;
short nodenumber; /* number of current node (32k max) */
short in_bootmem; /* pda lives in bootmem */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
short mmu_state;
short isidle;
struct mm_struct *active_mm;
unsigned apic_timer_irqs;
unsigned irq0_irqs;
unsigned irq_resched_count;
unsigned irq_call_count;
unsigned irq_tlb_count;
unsigned irq_thermal_count;
unsigned irq_threshold_count;
unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;
extern struct x8664_pda **_cpu_pda;
extern void pda_init(int);
#define cpu_pda(i) (_cpu_pda[i])
/*
* There is no fast way to get the base address of the PDA, all the accesses
* have to mention %fs/%gs. So it needs to be done this Torvaldian way.
*/
extern void __bad_pda_field(void) __attribute__((noreturn));
/*
* proxy_pda doesn't actually exist, but tell gcc it is accessed for
* all PDA accesses so it gets read/write dependencies right.
*/
extern struct x8664_pda _proxy_pda;
#define pda_offset(field) offsetof(struct x8664_pda, field)
#define pda_to_op(op, field, val) \
do { \
typedef typeof(_proxy_pda.field) T__; \
if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
asm(op "w %1,%%gs:%c2" : \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
case 4: \
asm(op "l %1,%%gs:%c2" : \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i" (pda_offset(field))); \
break; \
case 8: \
asm(op "q %1,%%gs:%c2": \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
default: \
__bad_pda_field(); \
} \
} while (0)
#define pda_from_op(op, field) \
({ \
typeof(_proxy_pda.field) ret__; \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
asm(op "w %%gs:%c1,%0" : \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 4: \
asm(op "l %%gs:%c1,%0": \
"=r" (ret__): \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 8: \
asm(op "q %%gs:%c1,%0": \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
default: \
__bad_pda_field(); \
} \
ret__; \
})
#define read_pda(field) pda_from_op("mov", field)
#define write_pda(field, val) pda_to_op("mov", field, val)
#define add_pda(field, val) pda_to_op("add", field, val)
#define sub_pda(field, val) pda_to_op("sub", field, val)
#define or_pda(field, val) pda_to_op("or", field, val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit, field) \
({ \
int old__; \
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (_proxy_pda.field) \
: "dIr" (bit), "i" (pda_offset(field)) : "memory");\
old__; \
})
#endif
#define PDA_STACKOFFSET (5*8)
#endif /* _ASM_X86_PDA_H */

View File

@@ -2,53 +2,12 @@
#define _ASM_X86_PERCPU_H
#ifdef CONFIG_X86_64
#include <linux/compiler.h>
/* Same as asm-generic/percpu.h, except that we store the per cpu offset
in the PDA. Longer term the PDA and every per cpu variable
should be just put into a single section and referenced directly
from %gs */
#ifdef CONFIG_SMP
#include <asm/pda.h>
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
#define __my_cpu_offset read_pda(data_offset)
#define per_cpu_offset(x) (__per_cpu_offset(x))
#define __percpu_seg gs
#define __percpu_mov_op movq
#else
#define __percpu_seg fs
#define __percpu_mov_op movl
#endif
#include <asm-generic/percpu.h>
DECLARE_PER_CPU(struct x8664_pda, pda);
/*
* These are supposed to be implemented as a single instruction which
* operates on the per-cpu data base segment. x86-64 doesn't have
* that yet, so this is a fairly inefficient workaround for the
* meantime. The single instruction is atomic with respect to
* preemption and interrupts, so we need to explicitly disable
* interrupts here to achieve the same effect. However, because it
* can be used from within interrupt-disable/enable, we can't actually
* disable interrupts; disabling preemption is enough.
*/
#define x86_read_percpu(var) \
({ \
typeof(per_cpu_var(var)) __tmp; \
preempt_disable(); \
__tmp = __get_cpu_var(var); \
preempt_enable(); \
__tmp; \
})
#define x86_write_percpu(var, val) \
do { \
preempt_disable(); \
__get_cpu_var(var) = (val); \
preempt_enable(); \
} while(0)
#else /* CONFIG_X86_64 */
#ifdef __ASSEMBLY__
@@ -65,47 +24,48 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
* PER_CPU(cpu_gdt_descr, %ebx)
*/
#ifdef CONFIG_SMP
#define PER_CPU(var, reg) \
movl %fs:per_cpu__##this_cpu_off, reg; \
#define PER_CPU(var, reg) \
__percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
lea per_cpu__##var(reg), reg
#define PER_CPU_VAR(var) %fs:per_cpu__##var
#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
#else /* ! SMP */
#define PER_CPU(var, reg) \
movl $per_cpu__##var, reg
#define PER_CPU(var, reg) \
__percpu_mov_op $per_cpu__##var, reg
#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */
#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else
#define INIT_PER_CPU_VAR(var) per_cpu__##var
#endif
#else /* ...!ASSEMBLY */
/*
* PER_CPU finds an address of a per-cpu variable.
*
* Args:
* var - variable name
* cpu - 32bit register containing the current CPU number
*
* The resulting address is stored in the "cpu" argument.
*
* Example:
* PER_CPU(cpu_gdt_descr, %ebx)
*/
#include <linux/stringify.h>
#ifdef CONFIG_SMP
#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
#define __my_cpu_offset percpu_read(this_cpu_off)
#else
#define __percpu_arg(x) "%" #x
#endif
#define __my_cpu_offset x86_read_percpu(this_cpu_off)
/*
* Initialized pointers to per-cpu variables needed for the boot
* processor need to use these macros to get the proper address
* offset from __per_cpu_load on SMP.
*
* There also must be an entry in vmlinux_64.lds.S
*/
#define DECLARE_INIT_PER_CPU(var) \
extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
#define __percpu_seg "%%fs:"
#else /* !SMP */
#define __percpu_seg ""
#endif /* SMP */
#include <asm-generic/percpu.h>
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var) init_per_cpu__##var
#else
#define init_per_cpu_var(var) per_cpu_var(var)
#endif
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
@@ -120,20 +80,25 @@ do { \
} \
switch (sizeof(var)) { \
case 1: \
asm(op "b %1,"__percpu_seg"%0" \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 2: \
asm(op "w %1,"__percpu_seg"%0" \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 4: \
asm(op "l %1,"__percpu_seg"%0" \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
: "re" ((T__)val)); \
break; \
default: __bad_percpu_size(); \
} \
} while (0)
@@ -143,17 +108,22 @@ do { \
typeof(var) ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_seg"%1,%0" \
asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 2: \
asm(op "w "__percpu_seg"%1,%0" \
asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 4: \
asm(op "l "__percpu_seg"%1,%0" \
asm(op "l "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
@@ -162,13 +132,30 @@ do { \
ret__; \
})
#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
int old__; \
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (per_cpu__##var) \
: "dIr" (bit)); \
old__; \
})
#include <asm-generic/percpu.h>
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
#endif /* !__ASSEMBLY__ */
#endif /* !CONFIG_X86_64 */
#ifdef CONFIG_SMP
@@ -195,9 +182,9 @@ do { \
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
*(early_per_cpu_ptr(_name) ? \
&early_per_cpu_ptr(_name)[_cpu] : \
&per_cpu(_name, _cpu))
#else /* !CONFIG_SMP */
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \

View File

@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
#define pte_none(x) (!(x).pte_low)
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range:

View File

@@ -1,7 +1,23 @@
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 2
/*
* traditional i386 two-level paging structure:
@@ -10,6 +26,7 @@
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
/*
* the i386 is two-level, so we don't really have any
* PMD directory physically.

View File

@@ -18,21 +18,6 @@
printk("%s:%d: bad pgd %p(%016Lx).\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}
static inline int pud_bad(pud_t pud)
{
return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
}
static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
}
/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
* not attempt to update the pte. In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
write_cr3(pgd);
}
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
/* Find an entry in the second-level page table.. */
#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
pmd_index(address))
#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
static inline int pte_none(pte_t pte)
{
return !pte.pte_low && !pte.pte_high;
}
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.

View File

@@ -1,12 +1,31 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
typedef union {
struct {
unsigned long pte_low, pte_high;
};
pteval_t pte;
} pte_t;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT
#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
#else
#define SHARED_KERNEL_PMD 1
#endif
#define PAGETABLE_LEVELS 3
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
@@ -25,4 +44,5 @@
*/
#define PTRS_PER_PTE 512
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */

View File

@@ -1,164 +1,9 @@
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
#define FIRST_USER_ADDRESS 0
#include <asm/page.h>
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
#define _PAGE_BIT_USER 2 /* userspace addressable */
#define _PAGE_BIT_PWT 3 /* page write through */
#define _PAGE_BIT_PCD 4 /* page cache disabled */
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
#define _PAGE_BIT_UNUSED3 11
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
/* - set: nonlinear file mapping, saved PTE; unset:swap */
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#define __HAVE_ARCH_PTE_SPECIAL
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
#define _PAGE_NX (_AT(pteval_t, 0))
#endif
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
_PAGE_DIRTY)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
#define _PAGE_CACHE_WB (0)
#define _PAGE_CACHE_WC (_PAGE_PWT)
#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_NX)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
_PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_NX)
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED)
#define PAGE_COPY PAGE_COPY_NOEXEC
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_NX)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
/* xwr */
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_EXEC
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
/*
* early identity mapping pte attrib macros.
*/
#ifdef CONFIG_X86_64
#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
#else
/*
* For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
* bits are combined, this will alow user to access the high address mapped
* VDSO in the presence of CONFIG_COMPAT_VDSO
*/
#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
#endif
#include <asm/pgtable_types.h>
/*
* Macro to mark a page protection value as UC-
@@ -170,9 +15,6 @@
#ifndef __ASSEMBLY__
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -183,6 +25,66 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pte_present(mm, addr, ptep, pte) \
native_set_pte_present(mm, addr, ptep, pte)
#define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte)
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
#ifndef __PAGETABLE_PUD_FOLDED
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
#ifndef set_pud
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd) native_pmd_clear(pmd)
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
{
native_pagetable_setup_start(base);
}
static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
{
native_pagetable_setup_done(base);
}
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x) native_pud_val(x)
#define __pud(x) native_make_pud(x)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x) native_pmd_val(x)
#define __pmd(x) native_make_pmd(x)
#endif
#define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -236,72 +138,84 @@ static inline unsigned long pte_pfn(pte_t pte)
static inline int pmd_large(pmd_t pte)
{
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v | set);
}
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v & ~clear);
}
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
return pte_clear_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
return pte_clear_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_RW);
return pte_clear_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkexec(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_NX);
return pte_clear_flags(pte, _PAGE_NX);
}
static inline pte_t pte_mkdirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_DIRTY);
return pte_set_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_ACCESSED);
return pte_set_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_mkwrite(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_RW);
return pte_set_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_PSE);
return pte_set_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_clrhuge(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_PSE);
return pte_clear_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_mkglobal(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_GLOBAL);
return pte_set_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_clrglobal(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
return pte_clear_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPECIAL);
return pte_set_flags(pte, _PAGE_SPECIAL);
}
extern pteval_t __supported_pte_mask;
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
@@ -374,75 +288,8 @@ static inline int is_new_memtype_allowed(unsigned long flags,
return 1;
}
#ifndef __ASSEMBLY__
/* Indicate that x86 has its own track and untrack pfn vma functions */
#define __HAVE_PFNMAP_TRACKING
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot);
#endif
/* Install a pte for a particular vaddr in kernel space. */
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
#ifdef CONFIG_X86_32
extern void native_pagetable_setup_start(pgd_t *base);
extern void native_pagetable_setup_done(pgd_t *base);
#else
static inline void native_pagetable_setup_start(pgd_t *base) {}
static inline void native_pagetable_setup_done(pgd_t *base) {}
#endif
struct seq_file;
extern void arch_report_meminfo(struct seq_file *m);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pte_present(mm, addr, ptep, pte) \
native_set_pte_present(mm, addr, ptep, pte)
#define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte)
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
#ifndef __PAGETABLE_PUD_FOLDED
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
#ifndef set_pud
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd) native_pmd_clear(pmd)
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
{
native_pagetable_setup_start(base);
}
static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
{
native_pagetable_setup_done(base);
}
#endif /* CONFIG_PARAVIRT */
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
@@ -451,6 +298,188 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
# include "pgtable_64.h"
#endif
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
{
return !pte.pte;
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte == b.pte;
}
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
}
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
out of sync with upper half. */
return (unsigned long)native_pmd_val(pmd) == 0;
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
static inline unsigned pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* (Currently stuck as a macro because of indirect forward reference
* to linux/mm.h:page_to_nid())
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
* this function returns the index of the entry in the pte page which would
* control the given virtual address
*/
static inline unsigned pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - PAGE_SHIFT);
}
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
}
static inline int pud_present(pud_t pud)
{
return pud_flags(pud) & _PAGE_PRESENT;
}
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline int pud_large(pud_t pud)
{
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
static inline int pud_bad(pud_t pud)
{
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#else
static inline int pud_large(pud_t pud)
{
return 0;
}
#endif /* PAGETABLE_LEVELS > 2 */
#if PAGETABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
}
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory. */
static inline unsigned pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
}
static inline int pgd_bad(pgd_t pgd)
{
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline int pgd_none(pgd_t pgd)
{
return !native_pgd_val(pgd);
}
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
@@ -476,28 +505,6 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
#ifndef __ASSEMBLY__
enum {
PG_LEVEL_NONE,
PG_LEVEL_4K,
PG_LEVEL_2M,
PG_LEVEL_1G,
PG_LEVEL_NUM
};
#ifdef CONFIG_PROC_FS
extern void update_page_count(int level, unsigned long pages);
#else
static inline void update_page_count(int level, unsigned long pages) { }
#endif
/*
* Helper function that returns the kernel pagetable entry controlling
* the virtual address 'address'. NULL means no pagetable entry present.
* NOTE: the return type is pte_t but if the pmd is PSE then we return it
* as a pte too.
*/
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{

Some files were not shown because too many files have changed in this diff Show More