Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -19,7 +19,10 @@
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return v->counter;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_set - set atomic variable
|
||||
@@ -28,7 +31,10 @@
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*/
|
||||
#define atomic_set(v, i) (((v)->counter) = (i))
|
||||
static inline void atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
v->counter = i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add - add integer to atomic variable
|
||||
@@ -200,8 +206,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
return atomic_add_return(-i, v);
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is already a given value
|
||||
@@ -250,45 +263,12 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
/* An 64bit atomic type */
|
||||
|
||||
typedef struct {
|
||||
unsigned long long counter;
|
||||
u64 __aligned(8) counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic64 variable
|
||||
* @ptr: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
* Doesn't imply a read memory barrier.
|
||||
*/
|
||||
#define __atomic64_read(ptr) ((ptr)->counter)
|
||||
|
||||
static inline unsigned long long
|
||||
cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new)
|
||||
{
|
||||
asm volatile(
|
||||
|
||||
LOCK_PREFIX "cmpxchg8b (%[ptr])\n"
|
||||
|
||||
: "=A" (old)
|
||||
|
||||
: [ptr] "D" (ptr),
|
||||
"A" (old),
|
||||
"b" (ll_low(new)),
|
||||
"c" (ll_high(new))
|
||||
|
||||
: "memory");
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline unsigned long long
|
||||
atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
|
||||
unsigned long long new_val)
|
||||
{
|
||||
return cmpxchg8b(&ptr->counter, old_val, new_val);
|
||||
}
|
||||
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
|
||||
|
||||
/**
|
||||
* atomic64_xchg - xchg atomic64 variable
|
||||
@@ -298,18 +278,7 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
|
||||
* Atomically xchgs the value of @ptr to @new_val and returns
|
||||
* the old value.
|
||||
*/
|
||||
|
||||
static inline unsigned long long
|
||||
atomic64_xchg(atomic64_t *ptr, unsigned long long new_val)
|
||||
{
|
||||
unsigned long long old_val;
|
||||
|
||||
do {
|
||||
old_val = atomic_read(ptr);
|
||||
} while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val);
|
||||
|
||||
return old_val;
|
||||
}
|
||||
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
|
||||
|
||||
/**
|
||||
* atomic64_set - set atomic64 variable
|
||||
@@ -318,10 +287,7 @@ atomic64_xchg(atomic64_t *ptr, unsigned long long new_val)
|
||||
*
|
||||
* Atomically sets the value of @ptr to @new_val.
|
||||
*/
|
||||
static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val)
|
||||
{
|
||||
atomic64_xchg(ptr, new_val);
|
||||
}
|
||||
extern void atomic64_set(atomic64_t *ptr, u64 new_val);
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic64 variable
|
||||
@@ -329,17 +295,30 @@ static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val)
|
||||
*
|
||||
* Atomically reads the value of @ptr and returns it.
|
||||
*/
|
||||
static inline unsigned long long atomic64_read(atomic64_t *ptr)
|
||||
static inline u64 atomic64_read(atomic64_t *ptr)
|
||||
{
|
||||
unsigned long long curr_val;
|
||||
u64 res;
|
||||
|
||||
do {
|
||||
curr_val = __atomic64_read(ptr);
|
||||
} while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val);
|
||||
/*
|
||||
* Note, we inline this atomic64_t primitive because
|
||||
* it only clobbers EAX/EDX and leaves the others
|
||||
* untouched. We also (somewhat subtly) rely on the
|
||||
* fact that cmpxchg8b returns the current 64-bit value
|
||||
* of the memory location we are touching:
|
||||
*/
|
||||
asm volatile(
|
||||
"mov %%ebx, %%eax\n\t"
|
||||
"mov %%ecx, %%edx\n\t"
|
||||
LOCK_PREFIX "cmpxchg8b %1\n"
|
||||
: "=&A" (res)
|
||||
: "m" (*ptr)
|
||||
);
|
||||
|
||||
return curr_val;
|
||||
return res;
|
||||
}
|
||||
|
||||
extern u64 atomic64_read(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_add_return - add and return
|
||||
* @delta: integer value to add
|
||||
@@ -347,34 +326,14 @@ static inline unsigned long long atomic64_read(atomic64_t *ptr)
|
||||
*
|
||||
* Atomically adds @delta to @ptr and returns @delta + *@ptr
|
||||
*/
|
||||
static inline unsigned long long
|
||||
atomic64_add_return(unsigned long long delta, atomic64_t *ptr)
|
||||
{
|
||||
unsigned long long old_val, new_val;
|
||||
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
|
||||
|
||||
do {
|
||||
old_val = atomic_read(ptr);
|
||||
new_val = old_val + delta;
|
||||
|
||||
} while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val);
|
||||
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr)
|
||||
{
|
||||
return atomic64_add_return(-delta, ptr);
|
||||
}
|
||||
|
||||
static inline long atomic64_inc_return(atomic64_t *ptr)
|
||||
{
|
||||
return atomic64_add_return(1, ptr);
|
||||
}
|
||||
|
||||
static inline long atomic64_dec_return(atomic64_t *ptr)
|
||||
{
|
||||
return atomic64_sub_return(1, ptr);
|
||||
}
|
||||
/*
|
||||
* Other variants with different arithmetic operators:
|
||||
*/
|
||||
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
|
||||
extern u64 atomic64_inc_return(atomic64_t *ptr);
|
||||
extern u64 atomic64_dec_return(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_add - add integer to atomic64 variable
|
||||
@@ -383,10 +342,7 @@ static inline long atomic64_dec_return(atomic64_t *ptr)
|
||||
*
|
||||
* Atomically adds @delta to @ptr.
|
||||
*/
|
||||
static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr)
|
||||
{
|
||||
atomic64_add_return(delta, ptr);
|
||||
}
|
||||
extern void atomic64_add(u64 delta, atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_sub - subtract the atomic64 variable
|
||||
@@ -395,10 +351,7 @@ static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr)
|
||||
*
|
||||
* Atomically subtracts @delta from @ptr.
|
||||
*/
|
||||
static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr)
|
||||
{
|
||||
atomic64_add(-delta, ptr);
|
||||
}
|
||||
extern void atomic64_sub(u64 delta, atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_sub_and_test - subtract value from variable and test result
|
||||
@@ -409,13 +362,7 @@ static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr)
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int
|
||||
atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr)
|
||||
{
|
||||
unsigned long long old_val = atomic64_sub_return(delta, ptr);
|
||||
|
||||
return old_val == 0;
|
||||
}
|
||||
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_inc - increment atomic64 variable
|
||||
@@ -423,10 +370,7 @@ atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr)
|
||||
*
|
||||
* Atomically increments @ptr by 1.
|
||||
*/
|
||||
static inline void atomic64_inc(atomic64_t *ptr)
|
||||
{
|
||||
atomic64_add(1, ptr);
|
||||
}
|
||||
extern void atomic64_inc(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_dec - decrement atomic64 variable
|
||||
@@ -434,10 +378,7 @@ static inline void atomic64_inc(atomic64_t *ptr)
|
||||
*
|
||||
* Atomically decrements @ptr by 1.
|
||||
*/
|
||||
static inline void atomic64_dec(atomic64_t *ptr)
|
||||
{
|
||||
atomic64_sub(1, ptr);
|
||||
}
|
||||
extern void atomic64_dec(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_dec_and_test - decrement and test
|
||||
@@ -447,10 +388,7 @@ static inline void atomic64_dec(atomic64_t *ptr)
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline int atomic64_dec_and_test(atomic64_t *ptr)
|
||||
{
|
||||
return atomic64_sub_and_test(1, ptr);
|
||||
}
|
||||
extern int atomic64_dec_and_test(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_inc_and_test - increment and test
|
||||
@@ -460,10 +398,7 @@ static inline int atomic64_dec_and_test(atomic64_t *ptr)
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic64_inc_and_test(atomic64_t *ptr)
|
||||
{
|
||||
return atomic64_sub_and_test(-1, ptr);
|
||||
}
|
||||
extern int atomic64_inc_and_test(atomic64_t *ptr);
|
||||
|
||||
/**
|
||||
* atomic64_add_negative - add and test if negative
|
||||
@@ -474,13 +409,7 @@ static inline int atomic64_inc_and_test(atomic64_t *ptr)
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline int
|
||||
atomic64_add_negative(unsigned long long delta, atomic64_t *ptr)
|
||||
{
|
||||
long long old_val = atomic64_add_return(delta, ptr);
|
||||
|
||||
return old_val < 0;
|
||||
}
|
||||
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
|
||||
|
||||
#include <asm-generic/atomic-long.h>
|
||||
#endif /* _ASM_X86_ATOMIC_32_H */
|
||||
|
@@ -18,7 +18,10 @@
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return v->counter;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_set - set atomic variable
|
||||
@@ -27,7 +30,10 @@
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*/
|
||||
#define atomic_set(v, i) (((v)->counter) = (i))
|
||||
static inline void atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
v->counter = i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add - add integer to atomic variable
|
||||
@@ -192,7 +198,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
* Atomically reads the value of @v.
|
||||
* Doesn't imply a read memory barrier.
|
||||
*/
|
||||
#define atomic64_read(v) ((v)->counter)
|
||||
static inline long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return v->counter;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_set - set atomic64 variable
|
||||
@@ -201,7 +210,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*/
|
||||
#define atomic64_set(v, i) (((v)->counter) = (i))
|
||||
static inline void atomic64_set(atomic64_t *v, long i)
|
||||
{
|
||||
v->counter = i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add - add integer to atomic64 variable
|
||||
@@ -355,11 +367,25 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
|
||||
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
|
||||
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
|
||||
|
||||
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
|
||||
static inline long atomic64_xchg(atomic64_t *v, long new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline long atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is a given value
|
||||
|
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
||||
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
||||
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
||||
|
||||
#define efi_ioremap(addr, size) ioremap_cache(addr, size)
|
||||
#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
|
||||
|
||||
#else /* !CONFIG_X86_32 */
|
||||
|
||||
@@ -84,7 +84,8 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
||||
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
||||
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
||||
|
||||
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
|
||||
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
|
||||
u32 type);
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
|
@@ -111,12 +111,9 @@ enum fixed_addresses {
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
FIX_PARAVIRT_BOOTMAP,
|
||||
#endif
|
||||
FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */
|
||||
FIX_TEXT_POKE1,
|
||||
FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
|
||||
FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
|
||||
__end_of_permanent_fixed_addresses,
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
/*
|
||||
* 256 temporary boot-time mappings, used by early_ioremap(),
|
||||
* before ioremap() is functional.
|
||||
@@ -129,6 +126,9 @@ enum fixed_addresses {
|
||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
|
||||
(__end_of_permanent_fixed_addresses & 255),
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
#ifdef CONFIG_X86_32
|
||||
FIX_WP_TEST,
|
||||
#endif
|
||||
|
@@ -161,6 +161,7 @@ extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr);
|
||||
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
|
||||
extern void ioapic_init_mappings(void);
|
||||
extern void ioapic_insert_resources(void);
|
||||
|
||||
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
|
||||
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
|
||||
@@ -180,6 +181,7 @@ extern void ioapic_write_entry(int apic, int pin,
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
static const int timer_through_8259 = 0;
|
||||
static inline void ioapic_init_mappings(void) { }
|
||||
static inline void ioapic_insert_resources(void) { }
|
||||
|
||||
static inline void probe_nr_irqs_gsi(void) { }
|
||||
#endif
|
||||
|
@@ -12,9 +12,15 @@ static inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Note: this needs to be "=r" not "=rm", because we have the
|
||||
* stack offset from what gcc expects at the time the "pop" is
|
||||
* executed, and so a memory reference with respect to the stack
|
||||
* would end up using the wrong address.
|
||||
*/
|
||||
asm volatile("# __raw_save_flags\n\t"
|
||||
"pushf ; pop %0"
|
||||
: "=g" (flags)
|
||||
: "=r" (flags)
|
||||
: /* no input */
|
||||
: "memory");
|
||||
|
||||
|
@@ -17,8 +17,7 @@
|
||||
/* Pages for switcher itself, then two pages per cpu */
|
||||
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
|
||||
|
||||
/* We map at -4M (-2M when PAE is activated) for ease of mapping
|
||||
* into the guest (one PTE page). */
|
||||
/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
|
||||
#ifdef CONFIG_X86_PAE
|
||||
#define SWITCHER_ADDR 0xFFE00000
|
||||
#else
|
||||
|
@@ -30,27 +30,27 @@
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/kvm_para.h>
|
||||
|
||||
/*G:031 But first, how does our Guest contact the Host to ask for privileged
|
||||
/*G:030
|
||||
* But first, how does our Guest contact the Host to ask for privileged
|
||||
* operations? There are two ways: the direct way is to make a "hypercall",
|
||||
* to make requests of the Host Itself.
|
||||
*
|
||||
* We use the KVM hypercall mechanism. Seventeen hypercalls are
|
||||
* available: the hypercall number is put in the %eax register, and the
|
||||
* arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
|
||||
* If a return value makes sense, it's returned in %eax.
|
||||
* We use the KVM hypercall mechanism, though completely different hypercall
|
||||
* numbers. Seventeen hypercalls are available: the hypercall number is put in
|
||||
* the %eax register, and the arguments (when required) are placed in %ebx,
|
||||
* %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax.
|
||||
*
|
||||
* Grossly invalid calls result in Sudden Death at the hands of the vengeful
|
||||
* Host, rather than returning failure. This reflects Winston Churchill's
|
||||
* definition of a gentleman: "someone who is only rude intentionally". */
|
||||
/*:*/
|
||||
* definition of a gentleman: "someone who is only rude intentionally".
|
||||
:*/
|
||||
|
||||
/* Can't use our min() macro here: needs to be a constant */
|
||||
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
|
||||
|
||||
#define LHCALL_RING_SIZE 64
|
||||
struct hcall_args {
|
||||
/* These map directly onto eax, ebx, ecx, edx and esi
|
||||
* in struct lguest_regs */
|
||||
/* These map directly onto eax/ebx/ecx/edx/esi in struct lguest_regs */
|
||||
unsigned long arg0, arg1, arg2, arg3, arg4;
|
||||
};
|
||||
|
||||
|
@@ -246,10 +246,6 @@
|
||||
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
|
||||
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
|
||||
|
||||
/* Intel Model 6 */
|
||||
#define MSR_P6_EVNTSEL0 0x00000186
|
||||
#define MSR_P6_EVNTSEL1 0x00000187
|
||||
|
||||
/* P4/Xeon+ specific */
|
||||
#define MSR_IA32_MCG_EAX 0x00000180
|
||||
#define MSR_IA32_MCG_EBX 0x00000181
|
||||
|
@@ -72,7 +72,6 @@ void lapic_watchdog_stop(void);
|
||||
int lapic_watchdog_init(unsigned nmi_hz);
|
||||
int lapic_wd_event(unsigned nmi_hz);
|
||||
unsigned lapic_adjust_nmi_hz(unsigned hz);
|
||||
int lapic_watchdog_ok(void);
|
||||
void disable_lapic_nmi_watchdog(void);
|
||||
void enable_lapic_nmi_watchdog(void);
|
||||
void stop_nmi(void);
|
||||
|
@@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
|
||||
__free_page(pte);
|
||||
}
|
||||
|
||||
extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
|
||||
extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
|
||||
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
|
||||
unsigned long address)
|
||||
{
|
||||
___pte_free_tlb(tlb, pte);
|
||||
}
|
||||
|
||||
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
||||
pmd_t *pmd, pte_t *pte)
|
||||
@@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
free_page((unsigned long)pmd);
|
||||
}
|
||||
|
||||
extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
|
||||
extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
|
||||
|
||||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
||||
unsigned long adddress)
|
||||
{
|
||||
___pmd_free_tlb(tlb, pmd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
|
||||
@@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
free_page((unsigned long)pud);
|
||||
}
|
||||
|
||||
extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
|
||||
extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
|
||||
|
||||
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
unsigned long address)
|
||||
{
|
||||
___pud_free_tlb(tlb, pud);
|
||||
}
|
||||
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
|
||||
|
@@ -302,4 +302,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
#define _raw_read_relax(lock) cpu_relax()
|
||||
#define _raw_write_relax(lock) cpu_relax()
|
||||
|
||||
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
||||
static inline void smp_mb__after_lock(void) { }
|
||||
#define ARCH_HAS_SMP_MB_AFTER_LOCK
|
||||
|
||||
#endif /* _ASM_X86_SPINLOCK_H */
|
||||
|
@@ -3,6 +3,8 @@
|
||||
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
int x86_is_stack_id(int id, char *name);
|
||||
|
||||
/* Generic stack tracer with callbacks */
|
||||
|
||||
struct stacktrace_ops {
|
||||
|
@@ -49,7 +49,7 @@ struct thread_info {
|
||||
.exec_domain = &default_exec_domain, \
|
||||
.flags = 0, \
|
||||
.cpu = 0, \
|
||||
.preempt_count = 1, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
.restart_block = { \
|
||||
.fn = do_no_restart_syscall, \
|
||||
|
@@ -212,9 +212,9 @@ extern int __get_user_bad(void);
|
||||
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
|
||||
#else
|
||||
#define __put_user_asm_u64(x, ptr, retval, errret) \
|
||||
__put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
|
||||
__put_user_asm(x, ptr, retval, "q", "", "er", errret)
|
||||
#define __put_user_asm_ex_u64(x, addr) \
|
||||
__put_user_asm_ex(x, addr, "q", "", "Zr")
|
||||
__put_user_asm_ex(x, addr, "q", "", "er")
|
||||
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
|
||||
#endif
|
||||
|
||||
|
@@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
||||
ret, "l", "k", "ir", 4);
|
||||
return ret;
|
||||
case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "ir", 8);
|
||||
ret, "q", "", "er", 8);
|
||||
return ret;
|
||||
case 10:
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "ir", 10);
|
||||
ret, "q", "", "er", 10);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
asm("":::"memory");
|
||||
@@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
||||
return ret;
|
||||
case 16:
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "ir", 16);
|
||||
ret, "q", "", "er", 16);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
asm("":::"memory");
|
||||
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
||||
ret, "q", "", "ir", 8);
|
||||
ret, "q", "", "er", 8);
|
||||
return ret;
|
||||
default:
|
||||
return copy_user_generic((__force void *)dst, src, size);
|
||||
@@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
||||
ret, "q", "", "=r", 8);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u64 __user *)dst,
|
||||
ret, "q", "", "ir", 8);
|
||||
ret, "q", "", "er", 8);
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
|
@@ -175,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
|
||||
|
||||
#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
|
||||
((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
|
||||
(((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
|
||||
|
||||
#define UV_APIC_PNODE_SHIFT 6
|
||||
|
||||
@@ -327,6 +327,7 @@ struct uv_blade_info {
|
||||
unsigned short nr_possible_cpus;
|
||||
unsigned short nr_online_cpus;
|
||||
unsigned short pnode;
|
||||
short memory_nid;
|
||||
};
|
||||
extern struct uv_blade_info *uv_blade_info;
|
||||
extern short *uv_node_to_blade;
|
||||
@@ -363,6 +364,12 @@ static inline int uv_blade_to_pnode(int bid)
|
||||
return uv_blade_info[bid].pnode;
|
||||
}
|
||||
|
||||
/* Nid of memory node on blade. -1 if no blade-local memory */
|
||||
static inline int uv_blade_to_memory_nid(int bid)
|
||||
{
|
||||
return uv_blade_info[bid].memory_nid;
|
||||
}
|
||||
|
||||
/* Determine the number of possible cpus on a blade */
|
||||
static inline int uv_blade_nr_possible_cpus(int bid)
|
||||
{
|
||||
|
Reference in New Issue
Block a user