Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The tree got pretty big in this development cycle, but the net effect is pretty good: 115 files changed, 673 insertions(+), 1522 deletions(-) The main changes were: - Rework and generalize the mutex code to remove per arch mutex primitives. (Peter Zijlstra) - Add vCPU preemption support: add an interface to query the preemption status of vCPUs and use it in locking primitives - this optimizes paravirt performance. (Pan Xinhui, Juergen Gross, Christian Borntraeger) - Introduce cpu_relax_yield() and remov cpu_relax_lowlatency() to clean up and improve the s390 lock yielding machinery and its core kernel impact. (Christian Borntraeger) - Micro-optimize mutexes some more. (Waiman Long) - Reluctantly add the to-be-deprecated mutex_trylock_recursive() interface on a temporary basis, to give the DRM code more time to get rid of its locking hacks. Any other users will be NAK-ed on sight. (We turned off the deprecation warning for the time being to not pollute the build log.) (Peter Zijlstra) - Improve the rtmutex code a bit, in light of recent long lived bugs/races. (Thomas Gleixner) - Misc fixes, cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) x86/paravirt: Fix bool return type for PVOP_CALL() x86/paravirt: Fix native_patch() locking/ww_mutex: Use relaxed atomics locking/rtmutex: Explain locking rules for rt_mutex_proxy_unlock()/init_proxy_locked() locking/rtmutex: Get rid of RT_MUTEX_OWNER_MASKALL x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted() locking/mutex: Break out of expensive busy-loop on {mutex,rwsem}_spin_on_owner() when owner vCPU is preempted locking/osq: Break out of spin-wait busy waiting loop for a preempted vCPU in osq_lock() Documentation/virtual/kvm: Support the vCPU preemption check x86/xen: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check kvm: Introduce kvm_write_guest_offset_cached() locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests locking/spinlocks, s390: Implement vcpu_is_preempted(cpu) locking/core, powerpc: Implement vcpu_is_preempted(cpu) sched/core: Introduce the vcpu_is_preempted(cpu) interface sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q locking/core: Provide common cpu_relax_yield() definition locking/mutex: Don't mark mutex_trylock_recursive() as deprecated, temporarily ...
This commit is contained in:
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -58,7 +58,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define ARCH_HAS_PREFETCH
|
||||
#define ARCH_HAS_PREFETCHW
|
||||
|
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
|
||||
* atomic dec based which can "count" any number of lock contenders.
|
||||
* This ideally needs to be fixed in core, but for now switching to dec ver.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-xchg.h>
|
||||
#endif
|
@@ -60,15 +60,12 @@ struct task_struct;
|
||||
#ifndef CONFIG_EZNPS_MTM_EXT
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#else
|
||||
|
||||
#define cpu_relax() \
|
||||
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
|
||||
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
|
||||
#endif
|
||||
|
||||
#define copy_segments(tsk, mm) do { } while (0)
|
||||
|
@@ -1,21 +0,0 @@
|
||||
/*
|
||||
* arch/arm/include/asm/mutex.h
|
||||
*
|
||||
* ARM optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef _ASM_MUTEX_H
|
||||
#define _ASM_MUTEX_H
|
||||
/*
|
||||
* On pre-ARMv6 hardware this results in a swp-based implementation,
|
||||
* which is the most efficient. For ARMv6+, we have exclusive memory
|
||||
* accessors and use atomic_dec to avoid the extra xchg operations
|
||||
* on the locking slowpaths.
|
||||
*/
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
#include <asm-generic/mutex-xchg.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#endif
|
||||
#endif /* _ASM_MUTEX_H */
|
@@ -82,8 +82,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
||||
|
@@ -24,7 +24,6 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += mman.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += msi.h
|
||||
generic-y += mutex.h
|
||||
generic-y += poll.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
|
@@ -149,8 +149,6 @@ static inline void cpu_relax(void)
|
||||
asm volatile("yield" ::: "memory");
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Thread switching */
|
||||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -92,7 +92,6 @@ extern struct avr32_cpuinfo boot_cpu_data;
|
||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
|
||||
|
||||
struct cpu_context {
|
||||
|
@@ -24,7 +24,6 @@ generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mman.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += percpu.h
|
||||
generic-y += pgalloc.h
|
||||
|
@@ -92,7 +92,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() smp_mb()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Get the Silicon Revision of the chip */
|
||||
static inline uint32_t __pure bfin_revid(void)
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#ifndef _ASM_C6X_MUTEX_H
|
||||
#define _ASM_C6X_MUTEX_H
|
||||
|
||||
#include <asm-generic/mutex-null.h>
|
||||
|
||||
#endif /* _ASM_C6X_MUTEX_H */
|
@@ -121,7 +121,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
||||
|
||||
#define cpu_relax() do { } while (0)
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -63,7 +63,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -107,7 +107,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* data cache prefetch */
|
||||
#define ARCH_HAS_PREFETCH
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -127,7 +127,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
local_irq_disable(); \
|
||||
|
@@ -1,8 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
#include <asm-generic/mutex-xchg.h>
|
@@ -56,7 +56,6 @@ struct thread_struct {
|
||||
}
|
||||
|
||||
#define cpu_relax() __vmyield()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* Decides where the kernel will search for a free chunk of vm space during
|
||||
|
@@ -1,90 +0,0 @@
|
||||
/*
|
||||
* ia64 implementation of the mutex fastpath.
|
||||
*
|
||||
* Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MUTEX_H
|
||||
#define _ASM_MUTEX_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, then the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int ret = ia64_fetchadd4_rel(count, 1);
|
||||
if (unlikely(ret < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*
|
||||
* If the architecture has no effective trylock variant, it should call the
|
||||
* <fail_fn> spinlock-based trylock variant unconditionally.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
@@ -547,7 +547,6 @@ ia64_eoi (void)
|
||||
}
|
||||
|
||||
#define cpu_relax() ia64_hint(ia64_hint_pause)
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
static inline int
|
||||
ia64_get_irr(unsigned int vector)
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -133,6 +133,5 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* _ASM_M32R_PROCESSOR_H */
|
||||
|
@@ -20,7 +20,6 @@ generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mman.h
|
||||
generic-y += mutex.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
|
@@ -156,6 +156,5 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif
|
||||
|
@@ -27,7 +27,6 @@ generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += pci.h
|
||||
generic-y += percpu.h
|
||||
|
@@ -152,7 +152,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern void setup_priv(void);
|
||||
|
||||
|
@@ -1 +0,0 @@
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -22,7 +22,6 @@
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
# define cpu_relax() barrier()
|
||||
# define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(tsk) \
|
||||
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
|
||||
|
@@ -9,7 +9,6 @@ generic-y += irq_work.h
|
||||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mutex.h
|
||||
generic-y += parport.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
|
@@ -389,7 +389,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* Return_address is a replacement for __builtin_return_address(count)
|
||||
|
@@ -1,16 +0,0 @@
|
||||
/* MN10300 Mutex fastpath
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public Licence
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
#include <asm-generic/mutex-null.h>
|
@@ -69,7 +69,6 @@ extern void print_cpu_info(struct mn10300_cpuinfo *);
|
||||
extern void dodgy_tsc(void);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* User space process size: 1.75GB (default).
|
||||
|
@@ -1 +0,0 @@
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -88,7 +88,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@@ -1,27 +0,0 @@
|
||||
/*
|
||||
* OpenRISC Linux
|
||||
*
|
||||
* Linux architectural port borrowing liberally from similar works of
|
||||
* others. All original copyrights apply as per the original source
|
||||
* declaration.
|
||||
*
|
||||
* OpenRISC implementation:
|
||||
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
|
||||
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
||||
* et al.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -92,7 +92,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_OPENRISC_PROCESSOR_H */
|
||||
|
@@ -16,7 +16,6 @@ generic-y += local.h
|
||||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += percpu.h
|
||||
generic-y += poll.h
|
||||
|
@@ -309,7 +309,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
* parisc_requires_coherency() is used to identify the combined VIPT/PIPT
|
||||
|
@@ -1,132 +0,0 @@
|
||||
/*
|
||||
* Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
|
||||
*/
|
||||
#ifndef _ASM_POWERPC_MUTEX_H
|
||||
#define _ASM_POWERPC_MUTEX_H
|
||||
|
||||
static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: lwarx %0,0,%1 # mutex trylock\n\
|
||||
cmpw 0,%0,%2\n\
|
||||
bne- 2f\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %3,0,%1\n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
"\n\
|
||||
2:"
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter), "r" (old), "r" (new)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int __mutex_dec_return_lock(atomic_t *v)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: lwarx %0,0,%1 # mutex lock\n\
|
||||
addic %0,%0,-1\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %0,0,%1\n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int __mutex_inc_return_unlock(atomic_t *v)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__(
|
||||
PPC_RELEASE_BARRIER
|
||||
"1: lwarx %0,0,%1 # mutex unlock\n\
|
||||
addic %0,%0,1\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %0,0,%1 \n\
|
||||
bne- 1b"
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_dec_return_lock(count) < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(__mutex_dec_return_lock(count) < 0))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_inc_return_unlock(count) <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to 0, and return 1 (success), or if the count
|
||||
* was not 1, then return 0 (failure).
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
@@ -404,8 +404,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
|
||||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Check that a certain kernel stack pointer is valid in task_struct p */
|
||||
int validate_sp(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes);
|
||||
|
@@ -52,6 +52,14 @@
|
||||
#define SYNC_IO
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
{
|
||||
return lock.slock == 0;
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -234,9 +234,10 @@ static inline unsigned short stap(void)
|
||||
/*
|
||||
* Give up the time slice of the virtual PU.
|
||||
*/
|
||||
void cpu_relax(void);
|
||||
#define cpu_relax_yield cpu_relax_yield
|
||||
void cpu_relax_yield(void);
|
||||
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
#define cpu_relax() barrier()
|
||||
|
||||
#define ECAG_CACHE_ATTRIBUTE 0
|
||||
#define ECAG_CPU_ATTRIBUTE 1
|
||||
|
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
||||
return __sync_bool_compare_and_swap(lock, old, new);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
|
||||
#else
|
||||
bool arch_vcpu_is_preempted(int cpu);
|
||||
#endif
|
||||
|
||||
#define vcpu_is_preempted arch_vcpu_is_preempted
|
||||
|
||||
/*
|
||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||
* on the local processor, one does not.
|
||||
|
@@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
|
||||
on_each_cpu(update_cpu_mhz, NULL, 0);
|
||||
}
|
||||
|
||||
void notrace cpu_relax(void)
|
||||
void notrace cpu_relax_yield(void)
|
||||
{
|
||||
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
|
||||
diag_stat_inc(DIAG_STAT_X044);
|
||||
@@ -61,7 +61,7 @@ void notrace cpu_relax(void)
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_relax);
|
||||
EXPORT_SYMBOL(cpu_relax_yield);
|
||||
|
||||
/*
|
||||
* cpu_init - initializes state that is per-CPU.
|
||||
|
@@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int smp_vcpu_scheduled(int cpu)
|
||||
bool arch_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return pcpu_running(pcpu_devices + cpu);
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return false;
|
||||
if (pcpu_running(pcpu_devices + cpu))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_vcpu_is_preempted);
|
||||
|
||||
void smp_yield_cpu(int cpu)
|
||||
{
|
||||
|
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
|
||||
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
|
||||
}
|
||||
|
||||
static inline int cpu_is_preempted(int cpu)
|
||||
{
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return 0;
|
||||
if (smp_vcpu_scheduled(cpu))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
continue;
|
||||
}
|
||||
/* First iteration: check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
continue;
|
||||
}
|
||||
/* Check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
|
||||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
|
||||
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#ifndef _ASM_SCORE_MUTEX_H
|
||||
#define _ASM_SCORE_MUTEX_H
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
|
||||
#endif /* _ASM_SCORE_MUTEX_H */
|
@@ -24,7 +24,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
#define release_thread(thread) do {} while (0)
|
||||
|
||||
/*
|
||||
|
@@ -1,109 +0,0 @@
|
||||
/*
|
||||
* arch/sh/include/asm/mutex-llsc.h
|
||||
*
|
||||
* SH-4A optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef __ASM_SH_MUTEX_LLSC_H
|
||||
#define __ASM_SH_MUTEX_LLSC_H
|
||||
|
||||
/*
|
||||
* Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
|
||||
* with a bastardized atomic decrement (it is not a reliable atomic decrement
|
||||
* but it satisfies the defined semantics for our purpose, while being
|
||||
* smaller and faster than a real atomic decrement or atomic swap.
|
||||
* The idea is to attempt decrementing the lock value only once. If once
|
||||
* decremented it isn't zero, or if its store-back fails due to a dispute
|
||||
* on the exclusive store, we simply bail out immediately through the slow
|
||||
* path where the lock will be reattempted until it succeeds.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __done, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n"
|
||||
"add #-1, %0 \n"
|
||||
"movco.l %0, @%2 \n"
|
||||
"movt %1 \n"
|
||||
: "=&z" (__res), "=&r" (__done)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
if (unlikely(!__done || __res != 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
int __done, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n"
|
||||
"add #-1, %0 \n"
|
||||
"movco.l %0, @%2 \n"
|
||||
"movt %1 \n"
|
||||
: "=&z" (__res), "=&r" (__done)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
if (unlikely(!__done || __res != 0))
|
||||
__res = -1;
|
||||
|
||||
return __res;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __done, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n\t"
|
||||
"add #1, %0 \n\t"
|
||||
"movco.l %0, @%2 \n\t"
|
||||
"movt %1 \n\t"
|
||||
: "=&z" (__res), "=&r" (__done)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
if (unlikely(!__done || __res <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the unlock was done on a contended lock, or if the unlock simply fails
|
||||
* then the mutex remains locked.
|
||||
*/
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/*
|
||||
* For __mutex_fastpath_trylock we do an atomic decrement and check the
|
||||
* result and put it in the __res variable.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __res, __orig;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: movli.l @%2, %0 \n\t"
|
||||
"dt %0 \n\t"
|
||||
"movco.l %0,@%2 \n\t"
|
||||
"bf 1b \n\t"
|
||||
"cmp/eq #0,%0 \n\t"
|
||||
"bt 2f \n\t"
|
||||
"mov #0, %1 \n\t"
|
||||
"bf 3f \n\t"
|
||||
"2: mov #1, %1 \n\t"
|
||||
"3: "
|
||||
: "=&z" (__orig), "=&r" (__res)
|
||||
: "r" (&count->counter)
|
||||
: "t");
|
||||
|
||||
return __res;
|
||||
}
|
||||
#endif /* __ASM_SH_MUTEX_LLSC_H */
|
@@ -1,12 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
#if defined(CONFIG_CPU_SH4A)
|
||||
#include <asm/mutex-llsc.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#endif
|
@@ -97,7 +97,6 @@ extern struct sh_cpuinfo cpu_data[];
|
||||
|
||||
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
void stop_this_cpu(void *);
|
||||
|
@@ -15,7 +15,6 @@ generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += module.h
|
||||
generic-y += mutex.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += serial.h
|
||||
|
@@ -119,7 +119,6 @@ extern struct task_struct *last_task_used_math;
|
||||
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern void (*sparc_idle)(void);
|
||||
|
||||
|
@@ -216,7 +216,6 @@ unsigned long get_wchan(struct task_struct *task);
|
||||
"nop\n\t" \
|
||||
".previous" \
|
||||
::: "memory")
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Prefetch support. This is tuned for UltraSPARC-III and later.
|
||||
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
|
||||
|
@@ -21,7 +21,6 @@ generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
generic-y += poll.h
|
||||
|
@@ -264,8 +264,6 @@ static inline void cpu_relax(void)
|
||||
barrier();
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
||||
struct seq_operations;
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
@@ -17,7 +17,6 @@ generic-y += irq_work.h
|
||||
generic-y += kdebug.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
generic-y += pci.h
|
||||
generic-y += percpu.h
|
||||
|
@@ -1,20 +0,0 @@
|
||||
/*
|
||||
* linux/arch/unicore32/include/asm/mutex.h
|
||||
*
|
||||
* Code specific to PKUnity SoC and UniCore ISA
|
||||
*
|
||||
* Copyright (C) 2001-2010 GUAN Xue-tao
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* UniCore optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef __UNICORE_MUTEX_H__
|
||||
#define __UNICORE_MUTEX_H__
|
||||
|
||||
# include <asm-generic/mutex-xchg.h>
|
||||
#endif
|
@@ -71,7 +71,6 @@ extern void release_thread(struct task_struct *);
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
@@ -1,5 +0,0 @@
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/mutex_32.h>
|
||||
#else
|
||||
# include <asm/mutex_64.h>
|
||||
#endif
|
@@ -1,110 +0,0 @@
|
||||
/*
|
||||
* Assembly implementation of the mutex fastpath, based on atomic
|
||||
* decrement/increment.
|
||||
*
|
||||
* started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*/
|
||||
#ifndef _ASM_X86_MUTEX_32_H
|
||||
#define _ASM_X86_MUTEX_32_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fn> if it
|
||||
* wasn't 1 originally. This function MUST leave the value lower than 1
|
||||
* even when the "1" assertion wasn't true.
|
||||
*/
|
||||
#define __mutex_fastpath_lock(count, fail_fn) \
|
||||
do { \
|
||||
unsigned int dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, count); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
|
||||
" jns 1f \n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:\n" \
|
||||
: "=a" (dummy) \
|
||||
: "a" (count) \
|
||||
: "memory", "ecx", "edx"); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value
|
||||
* to 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
#define __mutex_fastpath_unlock(count, fail_fn) \
|
||||
do { \
|
||||
unsigned int dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, count); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
|
||||
" jg 1f\n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:\n" \
|
||||
: "=a" (dummy) \
|
||||
: "a" (count) \
|
||||
: "memory", "ecx", "edx"); \
|
||||
} while (0)
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*/
|
||||
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
||||
int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
/* cmpxchg because it never induces a false contention state. */
|
||||
if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_MUTEX_32_H */
|
@@ -1,127 +0,0 @@
|
||||
/*
|
||||
* Assembly implementation of the mutex fastpath, based on atomic
|
||||
* decrement/increment.
|
||||
*
|
||||
* started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*/
|
||||
#ifndef _ASM_X86_MUTEX_64_H
|
||||
#define _ASM_X86_MUTEX_64_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - decrement and call function if negative
|
||||
* @v: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the result is negative
|
||||
*
|
||||
* Atomically decrements @v and calls <fail_fn> if the result is negative.
|
||||
*/
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
static inline void __mutex_fastpath_lock(atomic_t *v,
|
||||
void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
asm_volatile_goto(LOCK_PREFIX " decl %0\n"
|
||||
" jns %l[exit]\n"
|
||||
: : "m" (v->counter)
|
||||
: "memory", "cc"
|
||||
: exit);
|
||||
fail_fn(v);
|
||||
exit:
|
||||
return;
|
||||
}
|
||||
#else
|
||||
#define __mutex_fastpath_lock(v, fail_fn) \
|
||||
do { \
|
||||
unsigned long dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, v); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
|
||||
" jns 1f \n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:" \
|
||||
: "=D" (dummy) \
|
||||
: "D" (v) \
|
||||
: "rax", "rsi", "rdx", "rcx", \
|
||||
"r8", "r9", "r10", "r11", "memory"); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1. This function returns 0
|
||||
* if the fastpath succeeds, or -1 otherwise.
|
||||
*/
|
||||
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
return -1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - increment and call function if nonpositive
|
||||
* @v: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the result is nonpositive
|
||||
*
|
||||
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
|
||||
*/
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
static inline void __mutex_fastpath_unlock(atomic_t *v,
|
||||
void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
asm_volatile_goto(LOCK_PREFIX " incl %0\n"
|
||||
" jg %l[exit]\n"
|
||||
: : "m" (v->counter)
|
||||
: "memory", "cc"
|
||||
: exit);
|
||||
fail_fn(v);
|
||||
exit:
|
||||
return;
|
||||
}
|
||||
#else
|
||||
#define __mutex_fastpath_unlock(v, fail_fn) \
|
||||
do { \
|
||||
unsigned long dummy; \
|
||||
\
|
||||
typecheck(atomic_t *, v); \
|
||||
typecheck_fn(void (*)(atomic_t *), fail_fn); \
|
||||
\
|
||||
asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
|
||||
" jg 1f\n" \
|
||||
" call " #fail_fn "\n" \
|
||||
"1:" \
|
||||
: "=D" (dummy) \
|
||||
: "D" (v) \
|
||||
: "rax", "rsi", "rdx", "rcx", \
|
||||
"r8", "r9", "r10", "r11", "memory"); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
|
||||
* if it wasn't 1 originally. [the fallback function is never used on
|
||||
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
|
||||
*/
|
||||
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
||||
int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_MUTEX_64_H */
|
@@ -678,6 +678,11 @@ static __always_inline void pv_kick(int cpu)
|
||||
PVOP_VCALL1(pv_lock_ops.kick, cpu);
|
||||
}
|
||||
|
||||
static __always_inline bool pv_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
|
||||
}
|
||||
|
||||
#endif /* SMP && PARAVIRT_SPINLOCKS */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@@ -310,6 +310,8 @@ struct pv_lock_ops {
|
||||
|
||||
void (*wait)(u8 *ptr, u8 val);
|
||||
void (*kick)(int cpu);
|
||||
|
||||
struct paravirt_callee_save vcpu_is_preempted;
|
||||
};
|
||||
|
||||
/* This contains all the paravirt structures: we get a convenient
|
||||
@@ -508,6 +510,18 @@ int paravirt_disable_iospace(void);
|
||||
#define PVOP_TEST_NULL(op) ((void)op)
|
||||
#endif
|
||||
|
||||
#define PVOP_RETMASK(rettype) \
|
||||
({ unsigned long __mask = ~0UL; \
|
||||
switch (sizeof(rettype)) { \
|
||||
case 1: __mask = 0xffUL; break; \
|
||||
case 2: __mask = 0xffffUL; break; \
|
||||
case 4: __mask = 0xffffffffUL; break; \
|
||||
default: break; \
|
||||
} \
|
||||
__mask; \
|
||||
})
|
||||
|
||||
|
||||
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
|
||||
pre, post, ...) \
|
||||
({ \
|
||||
@@ -535,7 +549,7 @@ int paravirt_disable_iospace(void);
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)__eax; \
|
||||
__ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
@@ -588,8 +588,6 @@ static __always_inline void cpu_relax(void)
|
||||
rep_nop();
|
||||
}
|
||||
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
|
@@ -32,6 +32,12 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
pv_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return pv_vcpu_is_preempted(cpu);
|
||||
}
|
||||
#else
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
|
@@ -45,7 +45,9 @@ struct kvm_steal_time {
|
||||
__u64 steal;
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u32 pad[12];
|
||||
__u8 preempted;
|
||||
__u8 u8_pad[3];
|
||||
__u32 pad[11];
|
||||
};
|
||||
|
||||
#define KVM_STEAL_ALIGNMENT_BITS 5
|
||||
|
@@ -592,6 +592,14 @@ out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
__visible bool __kvm_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
return !!src->preempted;
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
||||
|
||||
/*
|
||||
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
||||
*/
|
||||
@@ -608,6 +616,11 @@ void __init kvm_spinlock_init(void)
|
||||
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_lock_ops.wait = kvm_wait;
|
||||
pv_lock_ops.kick = kvm_kick_cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
pv_lock_ops.vcpu_is_preempted =
|
||||
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
|
||||
}
|
||||
}
|
||||
|
||||
static __init int kvm_spinlock_init_jump(void)
|
||||
|
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
native_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
|
||||
|
||||
bool pv_is_native_spin_unlock(void)
|
||||
@@ -21,12 +20,25 @@ bool pv_is_native_spin_unlock(void)
|
||||
__raw_callee_save___native_queued_spin_unlock;
|
||||
}
|
||||
|
||||
__visible bool __native_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
|
||||
|
||||
bool pv_is_native_vcpu_is_preempted(void)
|
||||
{
|
||||
return pv_lock_ops.vcpu_is_preempted.func ==
|
||||
__raw_callee_save___native_vcpu_is_preempted;
|
||||
}
|
||||
|
||||
struct pv_lock_ops pv_lock_ops = {
|
||||
#ifdef CONFIG_SMP
|
||||
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
||||
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
||||
.wait = paravirt_nop,
|
||||
.kick = paravirt_nop,
|
||||
.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
|
||||
#endif /* SMP */
|
||||
};
|
||||
EXPORT_SYMBOL(pv_lock_ops);
|
||||
|
@@ -12,6 +12,7 @@ DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
|
||||
DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
|
||||
#endif
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
||||
@@ -27,6 +28,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||
}
|
||||
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
@@ -56,9 +58,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
end = end_pv_lock_ops_queued_spin_unlock;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
|
||||
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
|
||||
if (pv_is_native_vcpu_is_preempted()) {
|
||||
start = start_pv_lock_ops_vcpu_is_preempted;
|
||||
end = end_pv_lock_ops_vcpu_is_preempted;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
#endif
|
||||
|
||||
default:
|
||||
patch_default:
|
||||
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
break;
|
||||
|
||||
|
@@ -21,6 +21,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
|
||||
DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
|
||||
#endif
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
||||
@@ -36,6 +37,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||
}
|
||||
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
@@ -68,9 +70,19 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
end = end_pv_lock_ops_queued_spin_unlock;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
|
||||
case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
|
||||
if (pv_is_native_vcpu_is_preempted()) {
|
||||
start = start_pv_lock_ops_vcpu_is_preempted;
|
||||
end = end_pv_lock_ops_vcpu_is_preempted;
|
||||
goto patch_site;
|
||||
}
|
||||
goto patch_default;
|
||||
#endif
|
||||
|
||||
default:
|
||||
patch_default:
|
||||
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
break;
|
||||
|
||||
|
@@ -2071,6 +2071,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
||||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
|
||||
return;
|
||||
|
||||
vcpu->arch.st.steal.preempted = 0;
|
||||
|
||||
if (vcpu->arch.st.steal.version & 1)
|
||||
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
|
||||
|
||||
@@ -2826,8 +2828,22 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
||||
}
|
||||
|
||||
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
||||
vcpu->arch.st.steal.preempted = 1;
|
||||
|
||||
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal.preempted,
|
||||
offsetof(struct kvm_steal_time, preempted),
|
||||
sizeof(vcpu->arch.st.steal.preempted));
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_steal_time_set_preempted(vcpu);
|
||||
kvm_x86_ops->vcpu_put(vcpu);
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
vcpu->arch.last_host_tsc = rdtsc();
|
||||
|
@@ -26,7 +26,6 @@ static inline void rep_nop(void)
|
||||
}
|
||||
|
||||
#define cpu_relax() rep_nop()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(t) (&(t)->thread.regs)
|
||||
|
||||
|
@@ -114,6 +114,7 @@ void xen_uninit_lock_cpu(int cpu)
|
||||
per_cpu(irq_name, cpu) = NULL;
|
||||
}
|
||||
|
||||
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
|
||||
|
||||
/*
|
||||
* Our init of PV spinlocks is split in two init functions due to us
|
||||
@@ -137,6 +138,7 @@ void __init xen_init_spinlocks(void)
|
||||
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
||||
pv_lock_ops.wait = xen_qlock_wait;
|
||||
pv_lock_ops.kick = xen_qlock_kick;
|
||||
pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -206,7 +206,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Special register access. */
|
||||
|
||||
|
Reference in New Issue
Block a user