Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask into merge-rr-cpumask
Conflicts: arch/x86/kernel/io_apic.c kernel/rcuclassic.c kernel/sched.c kernel/time/tick-sched.c Signed-off-by: Mike Travis <travis@sgi.com> [ mingo@elte.hu: backmerged typo fix for io_apic.c ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
asm volatile(LOCK_PREFIX "xorb %1,%0"
|
||||
: CONST_MASK_ADDR(nr, addr)
|
||||
: "iq" ((u8)CONST_MASK(nr)));
|
||||
} else {
|
||||
asm volatile(LOCK_PREFIX "btc %1,%0"
|
||||
: BITOP_ADDR(addr)
|
||||
: "Ir" (nr));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -9,7 +9,7 @@
|
||||
#ifdef CONFIG_X86_32
|
||||
# define __BUG_C0 "2:\t.long 1b, %c0\n"
|
||||
#else
|
||||
# define __BUG_C0 "2:\t.quad 1b, %c0\n"
|
||||
# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n"
|
||||
#endif
|
||||
|
||||
#define BUG() \
|
||||
|
@@ -4,26 +4,33 @@
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define __LITTLE_ENDIAN
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
|
||||
{
|
||||
#ifdef CONFIG_X86_BSWAP
|
||||
asm("bswap %0" : "=r" (x) : "0" (x));
|
||||
#else
|
||||
#ifdef __i386__
|
||||
# ifdef CONFIG_X86_BSWAP
|
||||
asm("bswap %0" : "=r" (val) : "0" (val));
|
||||
# else
|
||||
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
|
||||
"rorl $16,%0\n\t" /* swap words */
|
||||
"xchgb %b0,%h0" /* swap higher bytes */
|
||||
: "=q" (x)
|
||||
: "0" (x));
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
: "=q" (val)
|
||||
: "0" (val));
|
||||
# endif
|
||||
|
||||
static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
|
||||
#else /* __i386__ */
|
||||
asm("bswapl %0"
|
||||
: "=r" (val)
|
||||
: "0" (val));
|
||||
#endif
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
|
||||
{
|
||||
#ifdef __i386__
|
||||
union {
|
||||
struct {
|
||||
__u32 a;
|
||||
@@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
|
||||
__u64 u;
|
||||
} v;
|
||||
v.u = val;
|
||||
#ifdef CONFIG_X86_BSWAP
|
||||
# ifdef CONFIG_X86_BSWAP
|
||||
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
|
||||
: "=r" (v.s.a), "=r" (v.s.b)
|
||||
: "0" (v.s.a), "1" (v.s.b));
|
||||
#else
|
||||
v.s.a = ___arch__swab32(v.s.a);
|
||||
v.s.b = ___arch__swab32(v.s.b);
|
||||
# else
|
||||
v.s.a = __arch_swab32(v.s.a);
|
||||
v.s.b = __arch_swab32(v.s.b);
|
||||
asm("xchgl %0,%1"
|
||||
: "=r" (v.s.a), "=r" (v.s.b)
|
||||
: "0" (v.s.a), "1" (v.s.b));
|
||||
#endif
|
||||
# endif
|
||||
return v.u;
|
||||
}
|
||||
|
||||
#else /* __i386__ */
|
||||
|
||||
static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
|
||||
{
|
||||
asm("bswapq %0"
|
||||
: "=r" (x)
|
||||
: "0" (x));
|
||||
return x;
|
||||
}
|
||||
|
||||
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
{
|
||||
asm("bswapl %0"
|
||||
: "=r" (x)
|
||||
: "0" (x));
|
||||
return x;
|
||||
}
|
||||
|
||||
: "=r" (val)
|
||||
: "0" (val));
|
||||
return val;
|
||||
#endif
|
||||
}
|
||||
#define __arch_swab64 __arch_swab64
|
||||
|
||||
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
|
||||
convert it into rotation or exhange. */
|
||||
|
||||
#define __arch__swab64(x) ___arch__swab64(x)
|
||||
#define __arch__swab32(x) ___arch__swab32(x)
|
||||
|
||||
#define __BYTEORDER_HAS_U64__
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#include <linux/byteorder/little_endian.h>
|
||||
#include <linux/byteorder.h>
|
||||
|
||||
#endif /* _ASM_X86_BYTEORDER_H */
|
||||
|
@@ -80,7 +80,6 @@
|
||||
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
|
||||
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
|
||||
@@ -92,6 +91,8 @@
|
||||
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
|
||||
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
|
||||
@@ -117,6 +118,7 @@
|
||||
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
|
||||
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
|
||||
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
|
||||
@@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32];
|
||||
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
|
||||
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
|
||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||
|
||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||
# define cpu_has_invlpg 1
|
||||
|
@@ -65,18 +65,16 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
||||
return dma_ops;
|
||||
else
|
||||
return dev->archdata.dma_ops;
|
||||
#endif /* _ASM_X86_DMA_MAPPING_H */
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Make sure we keep the same behaviour */
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||
if (ops->mapping_error)
|
||||
return ops->mapping_error(dev, dma_addr);
|
||||
|
||||
#endif
|
||||
return (dma_addr == bad_dma_address);
|
||||
}
|
||||
|
||||
|
@@ -6,13 +6,13 @@
|
||||
* precise-event based sampling (PEBS).
|
||||
*
|
||||
* It manages:
|
||||
* - per-thread and per-cpu allocation of BTS and PEBS
|
||||
* - DS and BTS hardware configuration
|
||||
* - buffer overflow handling (to be done)
|
||||
* - buffer access
|
||||
*
|
||||
* It assumes:
|
||||
* - get_task_struct on all traced tasks
|
||||
* - current is allowed to trace tasks
|
||||
* It does not do:
|
||||
* - security checking (is the caller allowed to trace the task)
|
||||
* - buffer allocation (memory accounting)
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2007-2008 Intel Corporation.
|
||||
@@ -31,6 +31,7 @@
|
||||
#ifdef CONFIG_X86_DS
|
||||
|
||||
struct task_struct;
|
||||
struct ds_context;
|
||||
struct ds_tracer;
|
||||
struct bts_tracer;
|
||||
struct pebs_tracer;
|
||||
@@ -38,6 +39,38 @@ struct pebs_tracer;
|
||||
typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
|
||||
typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
|
||||
|
||||
|
||||
/*
|
||||
* A list of features plus corresponding macros to talk about them in
|
||||
* the ds_request function's flags parameter.
|
||||
*
|
||||
* We use the enum to index an array of corresponding control bits;
|
||||
* we use the macro to index a flags bit-vector.
|
||||
*/
|
||||
enum ds_feature {
|
||||
dsf_bts = 0,
|
||||
dsf_bts_kernel,
|
||||
#define BTS_KERNEL (1 << dsf_bts_kernel)
|
||||
/* trace kernel-mode branches */
|
||||
|
||||
dsf_bts_user,
|
||||
#define BTS_USER (1 << dsf_bts_user)
|
||||
/* trace user-mode branches */
|
||||
|
||||
dsf_bts_overflow,
|
||||
dsf_bts_max,
|
||||
dsf_pebs = dsf_bts_max,
|
||||
|
||||
dsf_pebs_max,
|
||||
dsf_ctl_max = dsf_pebs_max,
|
||||
dsf_bts_timestamps = dsf_ctl_max,
|
||||
#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps)
|
||||
/* add timestamps into BTS trace */
|
||||
|
||||
#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS)
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Request BTS or PEBS
|
||||
*
|
||||
@@ -58,92 +91,135 @@ typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
|
||||
* NULL if cyclic buffer requested
|
||||
* th: the interrupt threshold in records from the end of the buffer;
|
||||
* -1 if no interrupt threshold is requested.
|
||||
* flags: a bit-mask of the above flags
|
||||
*/
|
||||
extern struct bts_tracer *ds_request_bts(struct task_struct *task,
|
||||
void *base, size_t size,
|
||||
bts_ovfl_callback_t ovfl, size_t th);
|
||||
bts_ovfl_callback_t ovfl,
|
||||
size_t th, unsigned int flags);
|
||||
extern struct pebs_tracer *ds_request_pebs(struct task_struct *task,
|
||||
void *base, size_t size,
|
||||
pebs_ovfl_callback_t ovfl,
|
||||
size_t th);
|
||||
size_t th, unsigned int flags);
|
||||
|
||||
/*
|
||||
* Release BTS or PEBS resources
|
||||
*
|
||||
* Returns 0 on success; -Eerrno otherwise
|
||||
* Suspend and resume BTS or PEBS tracing
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
*/
|
||||
extern int ds_release_bts(struct bts_tracer *tracer);
|
||||
extern int ds_release_pebs(struct pebs_tracer *tracer);
|
||||
extern void ds_release_bts(struct bts_tracer *tracer);
|
||||
extern void ds_suspend_bts(struct bts_tracer *tracer);
|
||||
extern void ds_resume_bts(struct bts_tracer *tracer);
|
||||
extern void ds_release_pebs(struct pebs_tracer *tracer);
|
||||
extern void ds_suspend_pebs(struct pebs_tracer *tracer);
|
||||
extern void ds_resume_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
|
||||
/*
|
||||
* Get the (array) index of the write pointer.
|
||||
* (assuming an array of BTS/PEBS records)
|
||||
* The raw DS buffer state as it is used for BTS and PEBS recording.
|
||||
*
|
||||
* Returns 0 on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
* pos (out): will hold the result
|
||||
* This is the low-level, arch-dependent interface for working
|
||||
* directly on the raw trace data.
|
||||
*/
|
||||
extern int ds_get_bts_index(struct bts_tracer *tracer, size_t *pos);
|
||||
extern int ds_get_pebs_index(struct pebs_tracer *tracer, size_t *pos);
|
||||
struct ds_trace {
|
||||
/* the number of bts/pebs records */
|
||||
size_t n;
|
||||
/* the size of a bts/pebs record in bytes */
|
||||
size_t size;
|
||||
/* pointers into the raw buffer:
|
||||
- to the first entry */
|
||||
void *begin;
|
||||
/* - one beyond the last entry */
|
||||
void *end;
|
||||
/* - one beyond the newest entry */
|
||||
void *top;
|
||||
/* - the interrupt threshold */
|
||||
void *ith;
|
||||
/* flags given on ds_request() */
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Get the (array) index one record beyond the end of the array.
|
||||
* (assuming an array of BTS/PEBS records)
|
||||
*
|
||||
* Returns 0 on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
* pos (out): will hold the result
|
||||
* An arch-independent view on branch trace data.
|
||||
*/
|
||||
extern int ds_get_bts_end(struct bts_tracer *tracer, size_t *pos);
|
||||
extern int ds_get_pebs_end(struct pebs_tracer *tracer, size_t *pos);
|
||||
enum bts_qualifier {
|
||||
bts_invalid,
|
||||
#define BTS_INVALID bts_invalid
|
||||
|
||||
bts_branch,
|
||||
#define BTS_BRANCH bts_branch
|
||||
|
||||
bts_task_arrives,
|
||||
#define BTS_TASK_ARRIVES bts_task_arrives
|
||||
|
||||
bts_task_departs,
|
||||
#define BTS_TASK_DEPARTS bts_task_departs
|
||||
|
||||
bts_qual_bit_size = 4,
|
||||
bts_qual_max = (1 << bts_qual_bit_size),
|
||||
};
|
||||
|
||||
struct bts_struct {
|
||||
__u64 qualifier;
|
||||
union {
|
||||
/* BTS_BRANCH */
|
||||
struct {
|
||||
__u64 from;
|
||||
__u64 to;
|
||||
} lbr;
|
||||
/* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */
|
||||
struct {
|
||||
__u64 jiffies;
|
||||
pid_t pid;
|
||||
} timestamp;
|
||||
} variant;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Provide a pointer to the BTS/PEBS record at parameter index.
|
||||
* (assuming an array of BTS/PEBS records)
|
||||
* The BTS state.
|
||||
*
|
||||
* The pointer points directly into the buffer. The user is
|
||||
* responsible for copying the record.
|
||||
*
|
||||
* Returns the size of a single record on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
* index: the index of the requested record
|
||||
* record (out): pointer to the requested record
|
||||
* This gives access to the raw DS state and adds functions to provide
|
||||
* an arch-independent view of the BTS data.
|
||||
*/
|
||||
extern int ds_access_bts(struct bts_tracer *tracer,
|
||||
size_t index, const void **record);
|
||||
extern int ds_access_pebs(struct pebs_tracer *tracer,
|
||||
size_t index, const void **record);
|
||||
struct bts_trace {
|
||||
struct ds_trace ds;
|
||||
|
||||
int (*read)(struct bts_tracer *tracer, const void *at,
|
||||
struct bts_struct *out);
|
||||
int (*write)(struct bts_tracer *tracer, const struct bts_struct *in);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Write one or more BTS/PEBS records at the write pointer index and
|
||||
* advance the write pointer.
|
||||
* The PEBS state.
|
||||
*
|
||||
* If size is not a multiple of the record size, trailing bytes are
|
||||
* zeroed out.
|
||||
* This gives access to the raw DS state and the PEBS-specific counter
|
||||
* reset value.
|
||||
*/
|
||||
struct pebs_trace {
|
||||
struct ds_trace ds;
|
||||
|
||||
/* the PEBS reset value */
|
||||
unsigned long long reset_value;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Read the BTS or PEBS trace.
|
||||
*
|
||||
* May result in one or more overflow notifications.
|
||||
* Returns a view on the trace collected for the parameter tracer.
|
||||
*
|
||||
* If called during overflow handling, that is, with index >=
|
||||
* interrupt threshold, the write will wrap around.
|
||||
*
|
||||
* An overflow notification is given if and when the interrupt
|
||||
* threshold is reached during or after the write.
|
||||
*
|
||||
* Returns the number of bytes written or -Eerrno.
|
||||
* The view remains valid as long as the traced task is not running or
|
||||
* the tracer is suspended.
|
||||
* Writes into the trace buffer are not reflected.
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
* buffer: the buffer to write
|
||||
* size: the size of the buffer
|
||||
*/
|
||||
extern int ds_write_bts(struct bts_tracer *tracer,
|
||||
const void *buffer, size_t size);
|
||||
extern int ds_write_pebs(struct pebs_tracer *tracer,
|
||||
const void *buffer, size_t size);
|
||||
extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer);
|
||||
extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
|
||||
/*
|
||||
* Reset the write pointer of the BTS/PEBS buffer.
|
||||
@@ -155,27 +231,6 @@ extern int ds_write_pebs(struct pebs_tracer *tracer,
|
||||
extern int ds_reset_bts(struct bts_tracer *tracer);
|
||||
extern int ds_reset_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
/*
|
||||
* Clear the BTS/PEBS buffer and reset the write pointer.
|
||||
* The entire buffer will be zeroed out.
|
||||
*
|
||||
* Returns 0 on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
*/
|
||||
extern int ds_clear_bts(struct bts_tracer *tracer);
|
||||
extern int ds_clear_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
/*
|
||||
* Provide the PEBS counter reset value.
|
||||
*
|
||||
* Returns 0 on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_pebs()
|
||||
* value (out): the counter reset value
|
||||
*/
|
||||
extern int ds_get_pebs_reset(struct pebs_tracer *tracer, u64 *value);
|
||||
|
||||
/*
|
||||
* Set the PEBS counter reset value.
|
||||
*
|
||||
@@ -192,35 +247,26 @@ extern int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value);
|
||||
struct cpuinfo_x86;
|
||||
extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
|
||||
|
||||
|
||||
/*
|
||||
* Context switch work
|
||||
*/
|
||||
extern void ds_switch_to(struct task_struct *prev, struct task_struct *next);
|
||||
|
||||
/*
|
||||
* The DS context - part of struct thread_struct.
|
||||
* Task clone/init and cleanup work
|
||||
*/
|
||||
#define MAX_SIZEOF_DS (12 * 8)
|
||||
|
||||
struct ds_context {
|
||||
/* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
|
||||
unsigned char ds[MAX_SIZEOF_DS];
|
||||
/* the owner of the BTS and PEBS configuration, respectively */
|
||||
struct ds_tracer *owner[2];
|
||||
/* use count */
|
||||
unsigned long count;
|
||||
/* a pointer to the context location inside the thread_struct
|
||||
* or the per_cpu context array */
|
||||
struct ds_context **this;
|
||||
/* a pointer to the task owning this context, or NULL, if the
|
||||
* context is owned by a cpu */
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
/* called by exit_thread() to free leftover contexts */
|
||||
extern void ds_free(struct ds_context *context);
|
||||
extern void ds_copy_thread(struct task_struct *tsk, struct task_struct *father);
|
||||
extern void ds_exit_thread(struct task_struct *tsk);
|
||||
|
||||
#else /* CONFIG_X86_DS */
|
||||
|
||||
struct cpuinfo_x86;
|
||||
static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
|
||||
static inline void ds_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next) {}
|
||||
static inline void ds_copy_thread(struct task_struct *tsk,
|
||||
struct task_struct *father) {}
|
||||
static inline void ds_exit_thread(struct task_struct *tsk) {}
|
||||
|
||||
#endif /* CONFIG_X86_DS */
|
||||
#endif /* _ASM_X86_DS_H */
|
||||
|
@@ -6,56 +6,91 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
Macros for dwarf2 CFI unwind table entries.
|
||||
See "as.info" for details on these pseudo ops. Unfortunately
|
||||
they are only supported in very new binutils, so define them
|
||||
away for older version.
|
||||
* Macros for dwarf2 CFI unwind table entries.
|
||||
* See "as.info" for details on these pseudo ops. Unfortunately
|
||||
* they are only supported in very new binutils, so define them
|
||||
* away for older version.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_AS_CFI
|
||||
|
||||
#define CFI_STARTPROC .cfi_startproc
|
||||
#define CFI_ENDPROC .cfi_endproc
|
||||
#define CFI_DEF_CFA .cfi_def_cfa
|
||||
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
|
||||
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
|
||||
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
|
||||
#define CFI_OFFSET .cfi_offset
|
||||
#define CFI_REL_OFFSET .cfi_rel_offset
|
||||
#define CFI_REGISTER .cfi_register
|
||||
#define CFI_RESTORE .cfi_restore
|
||||
#define CFI_REMEMBER_STATE .cfi_remember_state
|
||||
#define CFI_RESTORE_STATE .cfi_restore_state
|
||||
#define CFI_UNDEFINED .cfi_undefined
|
||||
#define CFI_STARTPROC .cfi_startproc
|
||||
#define CFI_ENDPROC .cfi_endproc
|
||||
#define CFI_DEF_CFA .cfi_def_cfa
|
||||
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
|
||||
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
|
||||
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
|
||||
#define CFI_OFFSET .cfi_offset
|
||||
#define CFI_REL_OFFSET .cfi_rel_offset
|
||||
#define CFI_REGISTER .cfi_register
|
||||
#define CFI_RESTORE .cfi_restore
|
||||
#define CFI_REMEMBER_STATE .cfi_remember_state
|
||||
#define CFI_RESTORE_STATE .cfi_restore_state
|
||||
#define CFI_UNDEFINED .cfi_undefined
|
||||
|
||||
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
|
||||
#define CFI_SIGNAL_FRAME .cfi_signal_frame
|
||||
#define CFI_SIGNAL_FRAME .cfi_signal_frame
|
||||
#else
|
||||
#define CFI_SIGNAL_FRAME
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/* Due to the structure of pre-exisiting code, don't use assembler line
|
||||
comment character # to ignore the arguments. Instead, use a dummy macro. */
|
||||
/*
|
||||
* Due to the structure of pre-exisiting code, don't use assembler line
|
||||
* comment character # to ignore the arguments. Instead, use a dummy macro.
|
||||
*/
|
||||
.macro cfi_ignore a=0, b=0, c=0, d=0
|
||||
.endm
|
||||
|
||||
#define CFI_STARTPROC cfi_ignore
|
||||
#define CFI_ENDPROC cfi_ignore
|
||||
#define CFI_DEF_CFA cfi_ignore
|
||||
#define CFI_STARTPROC cfi_ignore
|
||||
#define CFI_ENDPROC cfi_ignore
|
||||
#define CFI_DEF_CFA cfi_ignore
|
||||
#define CFI_DEF_CFA_REGISTER cfi_ignore
|
||||
#define CFI_DEF_CFA_OFFSET cfi_ignore
|
||||
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
|
||||
#define CFI_OFFSET cfi_ignore
|
||||
#define CFI_REL_OFFSET cfi_ignore
|
||||
#define CFI_REGISTER cfi_ignore
|
||||
#define CFI_RESTORE cfi_ignore
|
||||
#define CFI_REMEMBER_STATE cfi_ignore
|
||||
#define CFI_RESTORE_STATE cfi_ignore
|
||||
#define CFI_UNDEFINED cfi_ignore
|
||||
#define CFI_SIGNAL_FRAME cfi_ignore
|
||||
#define CFI_OFFSET cfi_ignore
|
||||
#define CFI_REL_OFFSET cfi_ignore
|
||||
#define CFI_REGISTER cfi_ignore
|
||||
#define CFI_RESTORE cfi_ignore
|
||||
#define CFI_REMEMBER_STATE cfi_ignore
|
||||
#define CFI_RESTORE_STATE cfi_ignore
|
||||
#define CFI_UNDEFINED cfi_ignore
|
||||
#define CFI_SIGNAL_FRAME cfi_ignore
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* An attempt to make CFI annotations more or less
|
||||
* correct and shorter. It is implied that you know
|
||||
* what you're doing if you use them.
|
||||
*/
|
||||
#ifdef __ASSEMBLY__
|
||||
#ifdef CONFIG_X86_64
|
||||
.macro pushq_cfi reg
|
||||
pushq \reg
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
.endm
|
||||
|
||||
.macro popq_cfi reg
|
||||
popq \reg
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
.endm
|
||||
|
||||
.macro movq_cfi reg offset=0
|
||||
movq %\reg, \offset(%rsp)
|
||||
CFI_REL_OFFSET \reg, \offset
|
||||
.endm
|
||||
|
||||
.macro movq_cfi_restore offset reg
|
||||
movq \offset(%rsp), %\reg
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#else /*!CONFIG_X86_64*/
|
||||
|
||||
/* 32bit defenitions are missed yet */
|
||||
|
||||
#endif /*!CONFIG_X86_64*/
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
#endif /* _ASM_X86_DWARF2_H */
|
||||
|
@@ -325,7 +325,7 @@ struct linux_binprm;
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
int uses_interp);
|
||||
|
||||
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
|
||||
#define compat_arch_setup_additional_pages syscall32_setup_pages
|
||||
|
@@ -1,6 +1,33 @@
|
||||
#ifndef _ASM_X86_FTRACE_H
|
||||
#define _ASM_X86_FTRACE_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro MCOUNT_SAVE_FRAME
|
||||
/* taken from glibc */
|
||||
subq $0x38, %rsp
|
||||
movq %rax, (%rsp)
|
||||
movq %rcx, 8(%rsp)
|
||||
movq %rdx, 16(%rsp)
|
||||
movq %rsi, 24(%rsp)
|
||||
movq %rdi, 32(%rsp)
|
||||
movq %r8, 40(%rsp)
|
||||
movq %r9, 48(%rsp)
|
||||
.endm
|
||||
|
||||
.macro MCOUNT_RESTORE_FRAME
|
||||
movq 48(%rsp), %r9
|
||||
movq 40(%rsp), %r8
|
||||
movq 32(%rsp), %rdi
|
||||
movq 24(%rsp), %rsi
|
||||
movq 16(%rsp), %rdx
|
||||
movq 8(%rsp), %rcx
|
||||
movq (%rsp), %rax
|
||||
addq $0x38, %rsp
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_ADDR ((long)(mcount))
|
||||
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
|
||||
@@ -46,7 +73,7 @@ struct ftrace_ret_stack {
|
||||
/*
|
||||
* Primary handler of a function return.
|
||||
* It relays on ftrace_return_to_handler.
|
||||
* Defined in entry32.S
|
||||
* Defined in entry_32/64.S
|
||||
*/
|
||||
extern void return_to_handler(void);
|
||||
|
||||
|
@@ -29,6 +29,39 @@ extern int fix_aperture;
|
||||
#define AMD64_GARTCACHECTL 0x9c
|
||||
#define AMD64_GARTEN (1<<0)
|
||||
|
||||
#ifdef CONFIG_GART_IOMMU
|
||||
extern int gart_iommu_aperture;
|
||||
extern int gart_iommu_aperture_allowed;
|
||||
extern int gart_iommu_aperture_disabled;
|
||||
|
||||
extern void early_gart_iommu_check(void);
|
||||
extern void gart_iommu_init(void);
|
||||
extern void gart_iommu_shutdown(void);
|
||||
extern void __init gart_parse_options(char *);
|
||||
extern void gart_iommu_hole_init(void);
|
||||
|
||||
#else
|
||||
#define gart_iommu_aperture 0
|
||||
#define gart_iommu_aperture_allowed 0
|
||||
#define gart_iommu_aperture_disabled 1
|
||||
|
||||
static inline void early_gart_iommu_check(void)
|
||||
{
|
||||
}
|
||||
static inline void gart_iommu_init(void)
|
||||
{
|
||||
}
|
||||
static inline void gart_iommu_shutdown(void)
|
||||
{
|
||||
}
|
||||
static inline void gart_parse_options(char *options)
|
||||
{
|
||||
}
|
||||
static inline void gart_iommu_hole_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int agp_amd64_init(void);
|
||||
|
||||
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
|
||||
|
@@ -22,6 +22,8 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
||||
#define __ARCH_IRQ_STAT
|
||||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
||||
|
||||
#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
|
||||
|
||||
void ack_bad_irq(unsigned int irq);
|
||||
#include <linux/irq_cpustat.h>
|
||||
|
||||
|
@@ -11,6 +11,8 @@
|
||||
|
||||
#define __ARCH_IRQ_STAT 1
|
||||
|
||||
#define inc_irq_stat(member) add_pda(member, 1)
|
||||
|
||||
#define local_softirq_pending() read_pda(__softirq_pending)
|
||||
|
||||
#define __ARCH_SET_SOFTIRQ_PENDING 1
|
||||
|
@@ -109,9 +109,7 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void (*const interrupt[NR_VECTORS])(void);
|
||||
#endif
|
||||
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
|
||||
|
||||
typedef int vector_irq_t[NR_VECTORS];
|
||||
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
||||
|
26
arch/x86/include/asm/hypervisor.h
Normal file
26
arch/x86/include/asm/hypervisor.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Copyright (C) 2008, VMware, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
*/
|
||||
#ifndef ASM_X86__HYPERVISOR_H
|
||||
#define ASM_X86__HYPERVISOR_H
|
||||
|
||||
extern unsigned long get_hypervisor_tsc_freq(void);
|
||||
extern void init_hypervisor(struct cpuinfo_x86 *c);
|
||||
|
||||
#endif
|
@@ -129,24 +129,6 @@ typedef struct compat_siginfo {
|
||||
} _sifields;
|
||||
} compat_siginfo_t;
|
||||
|
||||
struct sigframe32 {
|
||||
u32 pretcode;
|
||||
int sig;
|
||||
struct sigcontext_ia32 sc;
|
||||
struct _fpstate_ia32 fpstate;
|
||||
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
|
||||
};
|
||||
|
||||
struct rt_sigframe32 {
|
||||
u32 pretcode;
|
||||
int sig;
|
||||
u32 pinfo;
|
||||
u32 puc;
|
||||
compat_siginfo_t info;
|
||||
struct ucontext_ia32 uc;
|
||||
struct _fpstate_ia32 fpstate;
|
||||
};
|
||||
|
||||
struct ustat32 {
|
||||
__u32 f_tfree;
|
||||
compat_ino_t f_tinode;
|
||||
|
@@ -8,8 +8,13 @@ struct notifier_block;
|
||||
void idle_notifier_register(struct notifier_block *n);
|
||||
void idle_notifier_unregister(struct notifier_block *n);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void enter_idle(void);
|
||||
void exit_idle(void);
|
||||
#else /* !CONFIG_X86_64 */
|
||||
static inline void enter_idle(void) { }
|
||||
static inline void exit_idle(void) { }
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
void c1e_remove_cpu(int cpu);
|
||||
|
||||
|
@@ -4,6 +4,7 @@
|
||||
#define ARCH_HAS_IOREMAP_WC
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm-generic/int-ll64.h>
|
||||
|
||||
#define build_mmio_read(name, size, type, reg, barrier) \
|
||||
static inline type name(const volatile void __iomem *addr) \
|
||||
@@ -45,21 +46,39 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
|
||||
#define mmiowb() barrier()
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
|
||||
build_mmio_read(__readq, "q", unsigned long, "=r", )
|
||||
build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
|
||||
build_mmio_write(__writeq, "q", unsigned long, "r", )
|
||||
|
||||
#define readq_relaxed(a) __readq(a)
|
||||
#define __raw_readq __readq
|
||||
#define __raw_writeq writeq
|
||||
#else
|
||||
|
||||
static inline __u64 readq(const volatile void __iomem *addr)
|
||||
{
|
||||
const volatile u32 __iomem *p = addr;
|
||||
u32 low, high;
|
||||
|
||||
low = readl(p);
|
||||
high = readl(p + 1);
|
||||
|
||||
return low + ((u64)high << 32);
|
||||
}
|
||||
|
||||
static inline void writeq(__u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writel(val, addr);
|
||||
writel(val >> 32, addr+4);
|
||||
}
|
||||
|
||||
/* Let people know we have them */
|
||||
#define readq readq
|
||||
#define writeq writeq
|
||||
#endif
|
||||
|
||||
extern int iommu_bio_merge;
|
||||
#define readq_relaxed(a) readq(a)
|
||||
|
||||
#define __raw_readq(a) readq(a)
|
||||
#define __raw_writeq(val, addr) writeq(val, addr)
|
||||
|
||||
/* Let people know that we have them */
|
||||
#define readq readq
|
||||
#define writeq writeq
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "io_32.h"
|
||||
|
@@ -232,8 +232,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
|
||||
|
||||
#define flush_write_buffers()
|
||||
|
||||
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
*/
|
||||
|
@@ -156,11 +156,21 @@ extern int sis_apic_bug;
|
||||
/* 1 if "noapic" boot option passed */
|
||||
extern int skip_ioapic_setup;
|
||||
|
||||
/* 1 if "noapic" boot option passed */
|
||||
extern int noioapicquirk;
|
||||
|
||||
/* -1 if "noapic" boot option passed */
|
||||
extern int noioapicreroute;
|
||||
|
||||
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
|
||||
extern int timer_through_8259;
|
||||
|
||||
static inline void disable_ioapic_setup(void)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
noioapicquirk = 1;
|
||||
noioapicreroute = -1;
|
||||
#endif
|
||||
skip_ioapic_setup = 1;
|
||||
}
|
||||
|
||||
|
@@ -7,42 +7,7 @@ extern struct dma_mapping_ops nommu_dma_ops;
|
||||
extern int force_iommu, no_iommu;
|
||||
extern int iommu_detected;
|
||||
|
||||
extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
|
||||
|
||||
/* 10 seconds */
|
||||
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
|
||||
|
||||
#ifdef CONFIG_GART_IOMMU
|
||||
extern int gart_iommu_aperture;
|
||||
extern int gart_iommu_aperture_allowed;
|
||||
extern int gart_iommu_aperture_disabled;
|
||||
|
||||
extern void early_gart_iommu_check(void);
|
||||
extern void gart_iommu_init(void);
|
||||
extern void gart_iommu_shutdown(void);
|
||||
extern void __init gart_parse_options(char *);
|
||||
extern void gart_iommu_hole_init(void);
|
||||
|
||||
#else
|
||||
#define gart_iommu_aperture 0
|
||||
#define gart_iommu_aperture_allowed 0
|
||||
#define gart_iommu_aperture_disabled 1
|
||||
|
||||
static inline void early_gart_iommu_check(void)
|
||||
{
|
||||
}
|
||||
static inline void gart_iommu_init(void)
|
||||
{
|
||||
}
|
||||
static inline void gart_iommu_shutdown(void)
|
||||
{
|
||||
}
|
||||
static inline void gart_parse_options(char *options)
|
||||
{
|
||||
}
|
||||
static inline void gart_iommu_hole_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IOMMU_H */
|
||||
|
@@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQBALANCE
|
||||
extern int irqbalance_disable(char *str);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <linux/cpumask.h>
|
||||
extern void fixup_irqs(void);
|
||||
|
@@ -9,6 +9,8 @@
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#define ARCH_HAS_OWN_IRQ_REGS
|
||||
|
||||
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
|
||||
|
||||
static inline struct pt_regs *get_irq_regs(void)
|
||||
|
@@ -57,5 +57,65 @@
|
||||
#define __ALIGN_STR ".align 16,0x90"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* to check ENTRY_X86/END_X86 and
|
||||
* KPROBE_ENTRY_X86/KPROBE_END_X86
|
||||
* unbalanced-missed-mixed appearance
|
||||
*/
|
||||
#define __set_entry_x86 .set ENTRY_X86_IN, 0
|
||||
#define __unset_entry_x86 .set ENTRY_X86_IN, 1
|
||||
#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
|
||||
#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
|
||||
|
||||
#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
|
||||
|
||||
#define __check_entry_x86 \
|
||||
.ifdef ENTRY_X86_IN; \
|
||||
.ifeq ENTRY_X86_IN; \
|
||||
__macro_err_x86; \
|
||||
.abort; \
|
||||
.endif; \
|
||||
.endif
|
||||
|
||||
#define __check_kprobe_x86 \
|
||||
.ifdef KPROBE_X86_IN; \
|
||||
.ifeq KPROBE_X86_IN; \
|
||||
__macro_err_x86; \
|
||||
.abort; \
|
||||
.endif; \
|
||||
.endif
|
||||
|
||||
#define __check_entry_kprobe_x86 \
|
||||
__check_entry_x86; \
|
||||
__check_kprobe_x86
|
||||
|
||||
#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
|
||||
|
||||
#define ENTRY_X86(name) \
|
||||
__check_entry_kprobe_x86; \
|
||||
__set_entry_x86; \
|
||||
.globl name; \
|
||||
__ALIGN; \
|
||||
name:
|
||||
|
||||
#define END_X86(name) \
|
||||
__unset_entry_x86; \
|
||||
__check_entry_kprobe_x86; \
|
||||
.size name, .-name
|
||||
|
||||
#define KPROBE_ENTRY_X86(name) \
|
||||
__check_entry_kprobe_x86; \
|
||||
__set_kprobe_x86; \
|
||||
.pushsection .kprobes.text, "ax"; \
|
||||
.globl name; \
|
||||
__ALIGN; \
|
||||
name:
|
||||
|
||||
#define KPROBE_END_X86(name) \
|
||||
__unset_kprobe_x86; \
|
||||
__check_entry_kprobe_x86; \
|
||||
.size name, .-name; \
|
||||
.popsection
|
||||
|
||||
#endif /* _ASM_X86_LINKAGE_H */
|
||||
|
||||
|
@@ -4,9 +4,8 @@
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned cpu = smp_processor_id();
|
||||
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
|
||||
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
|
||||
if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
|
||||
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -20,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev,
|
||||
/* stop flush ipis for the previous mm */
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
#ifdef CONFIG_SMP
|
||||
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
|
||||
per_cpu(cpu_tlbstate, cpu).active_mm = next;
|
||||
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
x86_write_percpu(cpu_tlbstate.active_mm, next);
|
||||
#endif
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
|
||||
@@ -36,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev,
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
|
||||
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
|
||||
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
|
||||
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
|
||||
/* We were in lazy tlb mode and leave_mm disabled
|
||||
|
@@ -85,7 +85,9 @@
|
||||
/* AMD64 MSRs. Not complete. See the architecture manual for a more
|
||||
complete list. */
|
||||
|
||||
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
|
||||
#define MSR_AMD64_NB_CFG 0xc001001f
|
||||
#define MSR_AMD64_PATCH_LOADER 0xc0010020
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
|
||||
|
@@ -22,10 +22,10 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
|
||||
}
|
||||
|
||||
/*
|
||||
* i386 calling convention returns 64-bit value in edx:eax, while
|
||||
* x86_64 returns at rax. Also, the "A" constraint does not really
|
||||
* mean rdx:rax in x86_64, so we need specialized behaviour for each
|
||||
* architecture
|
||||
* both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
|
||||
* constraint has different meanings. For i386, "A" means exactly
|
||||
* edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
|
||||
* it means rax *or* rdx.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
#define DECLARE_ARGS(val, low, high) unsigned low, high
|
||||
@@ -85,7 +85,8 @@ static inline void native_write_msr(unsigned int msr,
|
||||
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
|
||||
}
|
||||
|
||||
static inline int native_write_msr_safe(unsigned int msr,
|
||||
/* Can be uninlined because referenced by paravirt */
|
||||
notrace static inline int native_write_msr_safe(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
int err;
|
||||
@@ -181,10 +182,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)native_read_tsc())
|
||||
((low) = (u32)__native_read_tsc())
|
||||
|
||||
#define rdtscll(val) \
|
||||
((val) = native_read_tsc())
|
||||
((val) = __native_read_tsc())
|
||||
|
||||
#define rdpmc(counter, low, high) \
|
||||
do { \
|
||||
|
@@ -19,6 +19,8 @@ struct pci_sysdata {
|
||||
};
|
||||
|
||||
extern int pci_routeirq;
|
||||
extern int noioapicquirk;
|
||||
extern int noioapicreroute;
|
||||
|
||||
/* scan a bus after allocating a pci_sysdata for it */
|
||||
extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
|
||||
@@ -82,6 +84,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
static inline void early_quirks(void) { }
|
||||
#endif
|
||||
|
||||
extern void pci_iommu_alloc(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -98,9 +102,9 @@ static inline void early_quirks(void) { }
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/* Returns the node based on pci bus */
|
||||
static inline int __pcibus_to_node(struct pci_bus *bus)
|
||||
static inline int __pcibus_to_node(const struct pci_bus *bus)
|
||||
{
|
||||
struct pci_sysdata *sd = bus->sysdata;
|
||||
const struct pci_sysdata *sd = bus->sysdata;
|
||||
|
||||
return sd->node;
|
||||
}
|
||||
@@ -109,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
|
||||
{
|
||||
return node_to_cpumask(__pcibus_to_node(bus));
|
||||
}
|
||||
|
||||
static inline const struct cpumask *
|
||||
cpumask_of_pcibus(const struct pci_bus *bus)
|
||||
{
|
||||
return cpumask_of_node(__pcibus_to_node(bus));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PCI_H */
|
||||
|
@@ -23,7 +23,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
|
||||
int reg, int len, u32 value);
|
||||
|
||||
extern void dma32_reserve_bootmem(void);
|
||||
extern void pci_iommu_alloc(void);
|
||||
|
||||
/* The PCI address space does equal the physical memory
|
||||
* address space. The networking and block device layers use
|
||||
|
@@ -56,23 +56,55 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
||||
#define pte_none(x) (!(x).pte_low)
|
||||
|
||||
/*
|
||||
* Bits 0, 6 and 7 are taken, split up the 29 bits of offset
|
||||
* into this range:
|
||||
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
|
||||
* split up the 29 bits of offset into this range:
|
||||
*/
|
||||
#define PTE_FILE_MAX_BITS 29
|
||||
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
|
||||
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
|
||||
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
|
||||
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
|
||||
#else
|
||||
#define PTE_FILE_SHIFT2 (_PAGE_BIT_PROTNONE + 1)
|
||||
#define PTE_FILE_SHIFT3 (_PAGE_BIT_FILE + 1)
|
||||
#endif
|
||||
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
|
||||
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
|
||||
|
||||
#define pte_to_pgoff(pte) \
|
||||
((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5))
|
||||
((((pte).pte_low >> PTE_FILE_SHIFT1) \
|
||||
& ((1U << PTE_FILE_BITS1) - 1)) \
|
||||
+ ((((pte).pte_low >> PTE_FILE_SHIFT2) \
|
||||
& ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \
|
||||
+ (((pte).pte_low >> PTE_FILE_SHIFT3) \
|
||||
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2)))
|
||||
|
||||
#define pgoff_to_pte(off) \
|
||||
((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \
|
||||
(((off) >> 5) << 8) + _PAGE_FILE })
|
||||
((pte_t) { .pte_low = \
|
||||
(((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
|
||||
+ ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \
|
||||
<< PTE_FILE_SHIFT2) \
|
||||
+ (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
|
||||
<< PTE_FILE_SHIFT3) \
|
||||
+ _PAGE_FILE })
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define __swp_type(x) (((x).val >> 1) & 0x1f)
|
||||
#define __swp_offset(x) ((x).val >> 8)
|
||||
#define __swp_entry(type, offset) \
|
||||
((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
|
||||
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
|
||||
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
|
||||
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
|
||||
#else
|
||||
#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
|
||||
#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
|
||||
#endif
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
||||
|
||||
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
|
||||
& ((1U << SWP_TYPE_BITS) - 1))
|
||||
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
((type) << (_PAGE_BIT_PRESENT + 1)) \
|
||||
| ((offset) << SWP_OFFSET_SHIFT) })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
|
@@ -166,6 +166,7 @@ static inline int pte_none(pte_t pte)
|
||||
#define PTE_FILE_MAX_BITS 32
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
|
||||
#define __swp_type(x) (((x).val) & 0x1f)
|
||||
#define __swp_offset(x) ((x).val >> 5)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#define _PAGE_BIT_PCD 4 /* page cache disabled */
|
||||
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
|
||||
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
|
||||
#define _PAGE_BIT_FILE 6
|
||||
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
|
||||
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
||||
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
||||
@@ -22,6 +21,12 @@
|
||||
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
|
||||
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
|
||||
|
||||
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
||||
/* - if the user mapped it with PROT_NONE; pte_present gives true */
|
||||
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
|
||||
/* - set: nonlinear file mapping, saved PTE; unset:swap */
|
||||
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
|
||||
|
||||
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
|
||||
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
|
||||
#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
|
||||
@@ -46,11 +51,8 @@
|
||||
#define _PAGE_NX (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
/* If _PAGE_PRESENT is clear, we use these: */
|
||||
#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
|
||||
* saved PTE; unset:swap */
|
||||
#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
|
||||
pte_present gives true */
|
||||
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
|
||||
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
||||
|
||||
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
|
||||
_PAGE_ACCESSED | _PAGE_DIRTY)
|
||||
@@ -158,8 +160,19 @@
|
||||
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as UC-
|
||||
*/
|
||||
#define pgprot_noncached(prot) \
|
||||
((boot_cpu_data.x86 > 3) \
|
||||
? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
|
||||
: (prot))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
extern pgprot_t pgprot_writecombine(pgprot_t prot);
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
@@ -329,6 +342,9 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Indicate that x86 has its own track and untrack pfn vma functions */
|
||||
#define __HAVE_PFNMAP_TRACKING
|
||||
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
struct file;
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
|
@@ -100,15 +100,6 @@ extern unsigned long pg0[];
|
||||
# include <asm/pgtable-2level.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
* On processors which do not support it, this is a no-op.
|
||||
*/
|
||||
#define pgprot_noncached(prot) \
|
||||
((boot_cpu_data.x86 > 3) \
|
||||
? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \
|
||||
: (prot))
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@@ -146,7 +146,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||
|
||||
|
||||
#define MAXMEM _AC(0x00003fffffffffff, UL)
|
||||
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
|
||||
#define VMALLOC_START _AC(0xffffc20000000000, UL)
|
||||
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
|
||||
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
|
||||
@@ -176,12 +176,6 @@ static inline int pmd_bad(pmd_t pmd)
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
*/
|
||||
#define pgprot_noncached(prot) \
|
||||
(__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
@@ -250,10 +244,22 @@ static inline int pud_large(pud_t pte)
|
||||
extern int direct_gbpages;
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define __swp_type(x) (((x).val >> 1) & 0x3f)
|
||||
#define __swp_offset(x) ((x).val >> 8)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
|
||||
((offset) << 8) })
|
||||
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
|
||||
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
|
||||
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
|
||||
#else
|
||||
#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
|
||||
#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
|
||||
#endif
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
||||
|
||||
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
|
||||
& ((1U << SWP_TYPE_BITS) - 1))
|
||||
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
((type) << (_PAGE_BIT_PRESENT + 1)) \
|
||||
| ((offset) << SWP_OFFSET_SHIFT) })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
|
@@ -6,5 +6,8 @@
|
||||
#define ARCH_GET_FS 0x1003
|
||||
#define ARCH_GET_GS 0x1004
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern long sys_arch_prctl(int, unsigned long);
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* _ASM_X86_PRCTL_H */
|
||||
|
@@ -110,6 +110,7 @@ struct cpuinfo_x86 {
|
||||
/* Index into per_cpu list: */
|
||||
u16 cpu_index;
|
||||
#endif
|
||||
unsigned int x86_hyper_vendor;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
|
||||
#define X86_VENDOR_INTEL 0
|
||||
@@ -123,6 +124,9 @@ struct cpuinfo_x86 {
|
||||
|
||||
#define X86_VENDOR_UNKNOWN 0xff
|
||||
|
||||
#define X86_HYPER_VENDOR_NONE 0
|
||||
#define X86_HYPER_VENDOR_VMWARE 1
|
||||
|
||||
/*
|
||||
* capabilities of CPUs
|
||||
*/
|
||||
@@ -752,6 +756,19 @@ extern void switch_to_new_gdt(void);
|
||||
extern void cpu_init(void);
|
||||
extern void init_gdt(int cpu);
|
||||
|
||||
static inline unsigned long get_debugctlmsr(void)
|
||||
{
|
||||
unsigned long debugctlmsr = 0;
|
||||
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return 0;
|
||||
#endif
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
|
||||
return debugctlmsr;
|
||||
}
|
||||
|
||||
static inline void update_debugctlmsr(unsigned long debugctlmsr)
|
||||
{
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
|
@@ -6,7 +6,6 @@
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
|
||||
#include <asm/segment.h>
|
||||
#endif
|
||||
|
||||
@@ -128,34 +127,6 @@ struct pt_regs {
|
||||
#endif /* !__i386__ */
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
/* a branch trace record entry
|
||||
*
|
||||
* In order to unify the interface between various processor versions,
|
||||
* we use the below data structure for all processors.
|
||||
*/
|
||||
enum bts_qualifier {
|
||||
BTS_INVALID = 0,
|
||||
BTS_BRANCH,
|
||||
BTS_TASK_ARRIVES,
|
||||
BTS_TASK_DEPARTS
|
||||
};
|
||||
|
||||
struct bts_struct {
|
||||
__u64 qualifier;
|
||||
union {
|
||||
/* BTS_BRANCH */
|
||||
struct {
|
||||
__u64 from_ip;
|
||||
__u64 to_ip;
|
||||
} lbr;
|
||||
/* BTS_TASK_ARRIVES or
|
||||
BTS_TASK_DEPARTS */
|
||||
__u64 jiffies;
|
||||
} variant;
|
||||
};
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/init.h>
|
||||
@@ -163,13 +134,6 @@ struct bts_struct {
|
||||
struct cpuinfo_x86;
|
||||
struct task_struct;
|
||||
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
|
||||
extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
|
||||
#else
|
||||
#define ptrace_bts_init_intel(config) do {} while (0)
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
|
||||
extern unsigned long
|
||||
@@ -271,6 +235,13 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
|
||||
extern int do_set_thread_area(struct task_struct *p, int idx,
|
||||
struct user_desc __user *info, int can_allocate);
|
||||
|
||||
extern void x86_ptrace_untrace(struct task_struct *);
|
||||
extern void x86_ptrace_fork(struct task_struct *child,
|
||||
unsigned long clone_flags);
|
||||
|
||||
#define arch_ptrace_untrace(tsk) x86_ptrace_untrace(tsk)
|
||||
#define arch_ptrace_fork(child, flags) x86_ptrace_fork(child, flags)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@@ -8,6 +8,10 @@
|
||||
/* Interrupt control for vSMPowered x86_64 systems */
|
||||
void vsmp_init(void);
|
||||
|
||||
|
||||
void setup_bios_corruption_check(void);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_VISWS
|
||||
extern void visws_early_detect(void);
|
||||
extern int is_visws_box(void);
|
||||
|
70
arch/x86/include/asm/sigframe.h
Normal file
70
arch/x86/include/asm/sigframe.h
Normal file
@@ -0,0 +1,70 @@
|
||||
#ifndef _ASM_X86_SIGFRAME_H
|
||||
#define _ASM_X86_SIGFRAME_H
|
||||
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/siginfo.h>
|
||||
#include <asm/ucontext.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define sigframe_ia32 sigframe
|
||||
#define rt_sigframe_ia32 rt_sigframe
|
||||
#define sigcontext_ia32 sigcontext
|
||||
#define _fpstate_ia32 _fpstate
|
||||
#define ucontext_ia32 ucontext
|
||||
#else /* !CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
#include <asm/ia32.h>
|
||||
#endif /* CONFIG_IA32_EMULATION */
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
||||
struct sigframe_ia32 {
|
||||
u32 pretcode;
|
||||
int sig;
|
||||
struct sigcontext_ia32 sc;
|
||||
/*
|
||||
* fpstate is unused. fpstate is moved/allocated after
|
||||
* retcode[] below. This movement allows to have the FP state and the
|
||||
* future state extensions (xsave) stay together.
|
||||
* And at the same time retaining the unused fpstate, prevents changing
|
||||
* the offset of extramask[] in the sigframe and thus prevent any
|
||||
* legacy application accessing/modifying it.
|
||||
*/
|
||||
struct _fpstate_ia32 fpstate_unused;
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
|
||||
#else /* !CONFIG_IA32_EMULATION */
|
||||
unsigned long extramask[_NSIG_WORDS-1];
|
||||
#endif /* CONFIG_IA32_EMULATION */
|
||||
char retcode[8];
|
||||
/* fp state follows here */
|
||||
};
|
||||
|
||||
struct rt_sigframe_ia32 {
|
||||
u32 pretcode;
|
||||
int sig;
|
||||
u32 pinfo;
|
||||
u32 puc;
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
compat_siginfo_t info;
|
||||
#else /* !CONFIG_IA32_EMULATION */
|
||||
struct siginfo info;
|
||||
#endif /* CONFIG_IA32_EMULATION */
|
||||
struct ucontext_ia32 uc;
|
||||
char retcode[8];
|
||||
/* fp state follows here */
|
||||
};
|
||||
#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
struct rt_sigframe {
|
||||
char __user *pretcode;
|
||||
struct ucontext uc;
|
||||
struct siginfo info;
|
||||
/* fp state follows here */
|
||||
};
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* _ASM_X86_SIGFRAME_H */
|
@@ -121,6 +121,10 @@ typedef unsigned long sigset_t;
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
# ifdef __KERNEL__
|
||||
extern void do_notify_resume(struct pt_regs *, void *, __u32);
|
||||
# endif /* __KERNEL__ */
|
||||
|
||||
#ifdef __i386__
|
||||
# ifdef __KERNEL__
|
||||
struct old_sigaction {
|
||||
@@ -141,8 +145,6 @@ struct k_sigaction {
|
||||
struct sigaction sa;
|
||||
};
|
||||
|
||||
extern void do_notify_resume(struct pt_regs *, void *, __u32);
|
||||
|
||||
# else /* __KERNEL__ */
|
||||
/* Here we must cater to libcs that poke about in kernel headers. */
|
||||
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#else /* CONFIG_X86_32 */
|
||||
# define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */
|
||||
# define MAX_PHYSADDR_BITS 44
|
||||
# define MAX_PHYSMEM_BITS 44
|
||||
# define MAX_PHYSMEM_BITS 44 /* Can be max 45 bits */
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
@@ -19,6 +19,13 @@
|
||||
/* kernel/ioport.c */
|
||||
asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
|
||||
|
||||
/* kernel/ldt.c */
|
||||
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
|
||||
|
||||
/* kernel/tls.c */
|
||||
asmlinkage int sys_set_thread_area(struct user_desc __user *);
|
||||
asmlinkage int sys_get_thread_area(struct user_desc __user *);
|
||||
|
||||
/* X86_32 only */
|
||||
#ifdef CONFIG_X86_32
|
||||
/* kernel/process_32.c */
|
||||
@@ -33,14 +40,11 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
|
||||
struct old_sigaction __user *);
|
||||
asmlinkage int sys_sigaltstack(unsigned long);
|
||||
asmlinkage unsigned long sys_sigreturn(unsigned long);
|
||||
asmlinkage int sys_rt_sigreturn(unsigned long);
|
||||
asmlinkage int sys_rt_sigreturn(struct pt_regs);
|
||||
|
||||
/* kernel/ioport.c */
|
||||
asmlinkage long sys_iopl(unsigned long);
|
||||
|
||||
/* kernel/ldt.c */
|
||||
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
|
||||
|
||||
/* kernel/sys_i386_32.c */
|
||||
asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long, unsigned long);
|
||||
@@ -54,10 +58,6 @@ asmlinkage int sys_uname(struct old_utsname __user *);
|
||||
struct oldold_utsname;
|
||||
asmlinkage int sys_olduname(struct oldold_utsname __user *);
|
||||
|
||||
/* kernel/tls.c */
|
||||
asmlinkage int sys_set_thread_area(struct user_desc __user *);
|
||||
asmlinkage int sys_get_thread_area(struct user_desc __user *);
|
||||
|
||||
/* kernel/vm86_32.c */
|
||||
asmlinkage int sys_vm86old(struct pt_regs);
|
||||
asmlinkage int sys_vm86(struct pt_regs);
|
||||
|
@@ -17,12 +17,12 @@
|
||||
# define AT_VECTOR_SIZE_ARCH 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
struct task_struct; /* one of the stranger aspects of C forward declarations */
|
||||
struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/*
|
||||
* Saving eflags is important. It switches not only IOPL between tasks,
|
||||
* it also protects other tasks from NT leaking through sysenter etc.
|
||||
|
@@ -26,7 +26,7 @@ struct exec_domain;
|
||||
struct thread_info {
|
||||
struct task_struct *task; /* main task structure */
|
||||
struct exec_domain *exec_domain; /* execution domain */
|
||||
unsigned long flags; /* low level flags */
|
||||
__u32 flags; /* low level flags */
|
||||
__u32 status; /* thread synchronous flags */
|
||||
__u32 cpu; /* current CPU */
|
||||
int preempt_count; /* 0 => preemptable,
|
||||
@@ -93,7 +93,6 @@ struct thread_info {
|
||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
|
||||
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
|
||||
#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
@@ -115,7 +114,6 @@ struct thread_info {
|
||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
|
||||
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
|
||||
#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
|
||||
|
||||
/* work to do in syscall_trace_enter() */
|
||||
#define _TIF_WORK_SYSCALL_ENTRY \
|
||||
@@ -141,8 +139,7 @@ struct thread_info {
|
||||
|
||||
/* flags to check in __switch_to() */
|
||||
#define _TIF_WORK_CTXSW \
|
||||
(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
|
||||
_TIF_NOTSC)
|
||||
(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC)
|
||||
|
||||
#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
||||
|
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu)
|
||||
*
|
||||
* Side note: this function creates the returned cpumask on the stack
|
||||
* so with a high NR_CPUS count, excessive stack space is used. The
|
||||
* node_to_cpumask_ptr function should be used whenever possible.
|
||||
* cpumask_of_node function should be used whenever possible.
|
||||
*/
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
extern int cpu_to_node(int cpu);
|
||||
extern int early_cpu_to_node(int cpu);
|
||||
extern const cpumask_t *_node_to_cpumask_ptr(int node);
|
||||
extern const cpumask_t *cpumask_of_node(int node);
|
||||
extern cpumask_t node_to_cpumask(int node);
|
||||
|
||||
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu)
|
||||
}
|
||||
|
||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
||||
static inline const cpumask_t *_node_to_cpumask_ptr(int node)
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node)
|
||||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
/* Replace default node_to_cpumask_ptr with optimized version */
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = _node_to_cpumask_ptr(node)
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = _node_to_cpumask_ptr(node)
|
||||
v = cpumask_of_node(node)
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
@@ -187,7 +196,7 @@ extern int __node_distance(int, int);
|
||||
#define cpu_to_node(cpu) 0
|
||||
#define early_cpu_to_node(cpu) 0
|
||||
|
||||
static inline const cpumask_t *_node_to_cpumask_ptr(int node)
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
return &cpu_online_map;
|
||||
}
|
||||
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node)
|
||||
return first_cpu(cpu_online_map);
|
||||
}
|
||||
|
||||
/* Replace default node_to_cpumask_ptr with optimized version */
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = _node_to_cpumask_ptr(node)
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = _node_to_cpumask_ptr(node)
|
||||
v = cpumask_of_node(node)
|
||||
#endif
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node)
|
||||
/* Returns the number of the first CPU on Node 'node'. */
|
||||
static inline int node_to_first_cpu(int node)
|
||||
{
|
||||
node_to_cpumask_ptr(mask, node);
|
||||
return first_cpu(*mask);
|
||||
return cpumask_first(cpumask_of_node(node));
|
||||
}
|
||||
#endif
|
||||
|
||||
extern cpumask_t cpu_coregroup_map(int cpu);
|
||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
|
@@ -3,6 +3,7 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_X86_TRAMPOLINE
|
||||
/*
|
||||
* Trampoline 80x86 program as an array.
|
||||
*/
|
||||
@@ -13,8 +14,14 @@ extern unsigned char *trampoline_base;
|
||||
extern unsigned long init_rsp;
|
||||
extern unsigned long initial_code;
|
||||
|
||||
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
|
||||
#define TRAMPOLINE_BASE 0x6000
|
||||
|
||||
extern unsigned long setup_trampoline(void);
|
||||
extern void __init reserve_trampoline_memory(void);
|
||||
#else
|
||||
static inline void reserve_trampoline_memory(void) {};
|
||||
#endif /* CONFIG_X86_TRAMPOLINE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@@ -46,6 +46,10 @@ dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
|
||||
dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
|
||||
dotraplinkage void do_segment_not_present(struct pt_regs *, long);
|
||||
dotraplinkage void do_stack_segment(struct pt_regs *, long);
|
||||
#ifdef CONFIG_X86_64
|
||||
dotraplinkage void do_double_fault(struct pt_regs *, long);
|
||||
asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *);
|
||||
#endif
|
||||
dotraplinkage void do_general_protection(struct pt_regs *, long);
|
||||
dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
|
||||
dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
|
||||
@@ -72,10 +76,13 @@ static inline int get_si_code(unsigned long condition)
|
||||
extern int panic_on_unrecovered_nmi;
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
void math_error(void __user *);
|
||||
unsigned long patch_espfix_desc(unsigned long, unsigned long);
|
||||
asmlinkage void math_emulate(long);
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long patch_espfix_desc(unsigned long, unsigned long);
|
||||
#else
|
||||
asmlinkage void smp_thermal_interrupt(void);
|
||||
asmlinkage void mce_threshold_interrupt(void);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_TRAPS_H */
|
||||
|
@@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void)
|
||||
|
||||
static __always_inline cycles_t vget_cycles(void)
|
||||
{
|
||||
cycles_t cycles;
|
||||
|
||||
/*
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
||||
* access boot_cpu_data (which is not VDSO-safe):
|
||||
@@ -44,11 +42,7 @@ static __always_inline cycles_t vget_cycles(void)
|
||||
if (!cpu_has_tsc)
|
||||
return 0;
|
||||
#endif
|
||||
rdtsc_barrier();
|
||||
cycles = (cycles_t)__native_read_tsc();
|
||||
rdtsc_barrier();
|
||||
|
||||
return cycles;
|
||||
return (cycles_t)__native_read_tsc();
|
||||
}
|
||||
|
||||
extern void tsc_init(void);
|
||||
|
@@ -352,14 +352,14 @@ do { \
|
||||
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
int __pu_err; \
|
||||
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err; \
|
||||
int __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
|
@@ -32,13 +32,18 @@
|
||||
enum uv_bios_cmd {
|
||||
UV_BIOS_COMMON,
|
||||
UV_BIOS_GET_SN_INFO,
|
||||
UV_BIOS_FREQ_BASE
|
||||
UV_BIOS_FREQ_BASE,
|
||||
UV_BIOS_WATCHLIST_ALLOC,
|
||||
UV_BIOS_WATCHLIST_FREE,
|
||||
UV_BIOS_MEMPROTECT,
|
||||
UV_BIOS_GET_PARTITION_ADDR
|
||||
};
|
||||
|
||||
/*
|
||||
* Status values returned from a BIOS call.
|
||||
*/
|
||||
enum {
|
||||
BIOS_STATUS_MORE_PASSES = 1,
|
||||
BIOS_STATUS_SUCCESS = 0,
|
||||
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
||||
BIOS_STATUS_EINVAL = -EINVAL,
|
||||
@@ -71,6 +76,21 @@ union partition_info_u {
|
||||
};
|
||||
};
|
||||
|
||||
union uv_watchlist_u {
|
||||
u64 val;
|
||||
struct {
|
||||
u64 blade : 16,
|
||||
size : 32,
|
||||
filler : 16;
|
||||
};
|
||||
};
|
||||
|
||||
enum uv_memprotect {
|
||||
UV_MEMPROT_RESTRICT_ACCESS,
|
||||
UV_MEMPROT_ALLOW_AMO,
|
||||
UV_MEMPROT_ALLOW_RW
|
||||
};
|
||||
|
||||
/*
|
||||
* bios calls have 6 parameters
|
||||
*/
|
||||
@@ -80,14 +100,20 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
|
||||
|
||||
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
|
||||
extern s64 uv_bios_freq_base(u64, u64 *);
|
||||
extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
|
||||
unsigned long *);
|
||||
extern int uv_bios_mq_watchlist_free(int, int);
|
||||
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
|
||||
extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
|
||||
|
||||
extern void uv_bios_init(void);
|
||||
|
||||
extern unsigned long sn_rtc_cycles_per_second;
|
||||
extern int uv_type;
|
||||
extern long sn_partition_id;
|
||||
extern long uv_coherency_id;
|
||||
extern long uv_region_size;
|
||||
#define partition_coherence_id() (uv_coherency_id)
|
||||
extern long sn_coherency_id;
|
||||
extern long sn_region_size;
|
||||
#define partition_coherence_id() (sn_coherency_id)
|
||||
|
||||
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
||||
|
||||
|
@@ -113,25 +113,37 @@
|
||||
*/
|
||||
#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2)
|
||||
|
||||
struct uv_scir_s {
|
||||
struct timer_list timer;
|
||||
unsigned long offset;
|
||||
unsigned long last;
|
||||
unsigned long idle_on;
|
||||
unsigned long idle_off;
|
||||
unsigned char state;
|
||||
unsigned char enabled;
|
||||
};
|
||||
|
||||
/*
|
||||
* The following defines attributes of the HUB chip. These attributes are
|
||||
* frequently referenced and are kept in the per-cpu data areas of each cpu.
|
||||
* They are kept together in a struct to minimize cache misses.
|
||||
*/
|
||||
struct uv_hub_info_s {
|
||||
unsigned long global_mmr_base;
|
||||
unsigned long gpa_mask;
|
||||
unsigned long gnode_upper;
|
||||
unsigned long lowmem_remap_top;
|
||||
unsigned long lowmem_remap_base;
|
||||
unsigned short pnode;
|
||||
unsigned short pnode_mask;
|
||||
unsigned short coherency_domain_number;
|
||||
unsigned short numa_blade_id;
|
||||
unsigned char blade_processor_id;
|
||||
unsigned char m_val;
|
||||
unsigned char n_val;
|
||||
unsigned long global_mmr_base;
|
||||
unsigned long gpa_mask;
|
||||
unsigned long gnode_upper;
|
||||
unsigned long lowmem_remap_top;
|
||||
unsigned long lowmem_remap_base;
|
||||
unsigned short pnode;
|
||||
unsigned short pnode_mask;
|
||||
unsigned short coherency_domain_number;
|
||||
unsigned short numa_blade_id;
|
||||
unsigned char blade_processor_id;
|
||||
unsigned char m_val;
|
||||
unsigned char n_val;
|
||||
struct uv_scir_s scir;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
|
||||
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
|
||||
@@ -163,6 +175,30 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
|
||||
#define UV_APIC_PNODE_SHIFT 6
|
||||
|
||||
/* Local Bus from cpu's perspective */
|
||||
#define LOCAL_BUS_BASE 0x1c00000
|
||||
#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
|
||||
|
||||
/*
|
||||
* System Controller Interface Reg
|
||||
*
|
||||
* Note there are NO leds on a UV system. This register is only
|
||||
* used by the system controller to monitor system-wide operation.
|
||||
* There are 64 regs per node. With Nahelem cpus (2 cores per node,
|
||||
* 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
|
||||
* a node.
|
||||
*
|
||||
* The window is located at top of ACPI MMR space
|
||||
*/
|
||||
#define SCIR_WINDOW_COUNT 64
|
||||
#define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \
|
||||
LOCAL_BUS_SIZE - \
|
||||
SCIR_WINDOW_COUNT)
|
||||
|
||||
#define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */
|
||||
#define SCIR_CPU_ACTIVITY 0x02 /* not idle */
|
||||
#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
|
||||
|
||||
/*
|
||||
* Macros for converting between kernel virtual addresses, socket local physical
|
||||
* addresses, and UV global physical addresses.
|
||||
@@ -174,7 +210,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
|
||||
{
|
||||
if (paddr < uv_hub_info->lowmem_remap_top)
|
||||
paddr += uv_hub_info->lowmem_remap_base;
|
||||
paddr |= uv_hub_info->lowmem_remap_base;
|
||||
return paddr | uv_hub_info->gnode_upper;
|
||||
}
|
||||
|
||||
@@ -182,19 +218,7 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
|
||||
/* socket virtual --> UV global physical address */
|
||||
static inline unsigned long uv_gpa(void *v)
|
||||
{
|
||||
return __pa(v) | uv_hub_info->gnode_upper;
|
||||
}
|
||||
|
||||
/* socket virtual --> UV global physical address */
|
||||
static inline void *uv_vgpa(void *v)
|
||||
{
|
||||
return (void *)uv_gpa(v);
|
||||
}
|
||||
|
||||
/* UV global physical address --> socket virtual */
|
||||
static inline void *uv_va(unsigned long gpa)
|
||||
{
|
||||
return __va(gpa & uv_hub_info->gpa_mask);
|
||||
return uv_soc_phys_ram_to_gpa(__pa(v));
|
||||
}
|
||||
|
||||
/* pnode, offset --> socket virtual */
|
||||
@@ -277,6 +301,16 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
|
||||
*uv_local_mmr_address(offset) = val;
|
||||
}
|
||||
|
||||
static inline unsigned char uv_read_local_mmr8(unsigned long offset)
|
||||
{
|
||||
return *((unsigned char *)uv_local_mmr_address(offset));
|
||||
}
|
||||
|
||||
static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
|
||||
{
|
||||
*((unsigned char *)uv_local_mmr_address(offset)) = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Structures and definitions for converting between cpu, node, pnode, and blade
|
||||
* numbers.
|
||||
@@ -351,5 +385,20 @@ static inline int uv_num_possible_blades(void)
|
||||
return uv_possible_blades;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_UV_UV_HUB_H */
|
||||
/* Update SCIR state */
|
||||
static inline void uv_set_scir_bits(unsigned char value)
|
||||
{
|
||||
if (uv_hub_info->scir.state != value) {
|
||||
uv_hub_info->scir.state = value;
|
||||
uv_write_local_mmr8(uv_hub_info->scir.offset, value);
|
||||
}
|
||||
}
|
||||
static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
|
||||
{
|
||||
if (uv_cpu_hub_info(cpu)->scir.state != value) {
|
||||
uv_cpu_hub_info(cpu)->scir.state = value;
|
||||
uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_UV_UV_HUB_H */
|
||||
|
27
arch/x86/include/asm/vmware.h
Normal file
27
arch/x86/include/asm/vmware.h
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Copyright (C) 2008, VMware, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
*/
|
||||
#ifndef ASM_X86__VMWARE_H
|
||||
#define ASM_X86__VMWARE_H
|
||||
|
||||
extern unsigned long vmware_get_tsc_khz(void);
|
||||
extern int vmware_platform(void);
|
||||
extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
|
||||
|
||||
#endif
|
@@ -33,8 +33,14 @@
|
||||
#ifndef _ASM_X86_XEN_HYPERCALL_H
|
||||
#define _ASM_X86_XEN_HYPERCALL_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/sched.h>
|
||||
|
@@ -33,39 +33,10 @@
|
||||
#ifndef _ASM_X86_XEN_HYPERVISOR_H
|
||||
#define _ASM_X86_XEN_HYPERVISOR_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/version.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/desc.h>
|
||||
#if defined(__i386__)
|
||||
# ifdef CONFIG_X86_PAE
|
||||
# include <asm-generic/pgtable-nopud.h>
|
||||
# else
|
||||
# include <asm-generic/pgtable-nopmd.h>
|
||||
# endif
|
||||
#endif
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
/* arch/i386/kernel/setup.c */
|
||||
extern struct shared_info *HYPERVISOR_shared_info;
|
||||
extern struct start_info *xen_start_info;
|
||||
|
||||
/* arch/i386/mach-xen/evtchn.c */
|
||||
/* Force a proper event-channel callback from Xen. */
|
||||
extern void force_evtchn_callback(void);
|
||||
|
||||
/* Turn jiffies into Xen system time. */
|
||||
u64 jiffies_to_st(unsigned long jiffies);
|
||||
|
||||
|
||||
#define MULTI_UVMFLAGS_INDEX 3
|
||||
#define MULTI_UVMDOMID_INDEX 4
|
||||
|
||||
enum xen_domain_type {
|
||||
XEN_NATIVE,
|
||||
XEN_PV_DOMAIN,
|
||||
@@ -74,9 +45,15 @@ enum xen_domain_type {
|
||||
|
||||
extern enum xen_domain_type xen_domain_type;
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
#define xen_domain() (xen_domain_type != XEN_NATIVE)
|
||||
#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
|
||||
#else
|
||||
#define xen_domain() (0)
|
||||
#endif
|
||||
|
||||
#define xen_pv_domain() (xen_domain() && xen_domain_type == XEN_PV_DOMAIN)
|
||||
#define xen_hvm_domain() (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN)
|
||||
|
||||
#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
|
||||
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
@@ -1,11 +1,16 @@
|
||||
#ifndef _ASM_X86_XEN_PAGE_H
|
||||
#define _ASM_X86_XEN_PAGE_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pfn.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/features.h>
|
||||
|
||||
/* Xen machine address */
|
||||
|
Reference in New Issue
Block a user