x86,vdso: Use LSL unconditionally for vgetcpu
LSL is faster than RDTSCP and works everywhere; there's no need to switch between them depending on CPU. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: Andi Kleen <andi@firstfloor.org> Link: http://lkml.kernel.org/r/72f73d5ec4514e02bba345b9759177ef03742efb.1414706021.git.luto@amacapital.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:

committed by
Thomas Gleixner

parent
a92f101bc9
commit
e76b027e64
@@ -70,4 +70,23 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)
|
||||
++s->seq;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#define VGETCPU_CPU_MASK 0xfff
|
||||
|
||||
static inline unsigned int __getcpu(void)
|
||||
{
|
||||
unsigned int p;
|
||||
|
||||
/*
|
||||
* Load per CPU data from GDT. LSL is faster than RDTSCP and
|
||||
* works on all CPUs.
|
||||
*/
|
||||
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* _ASM_X86_VGTOD_H */
|
||||
|
Reference in New Issue
Block a user