[PATCH] x86: GDT alignment fix

Make GDT page aligned and page padded to support running inside of a
hypervisor.  This prevents false sharing of the GDT page with other hot
data, which is not allowed in Xen, and causes performance problems in
VMware.

Rather than go back to the old method of statically allocating the GDT
(which wastes unneded space for non-present CPUs), the GDT for APs is
allocated dynamically.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: "Seth, Rohit" <rohit.seth@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Zachary Amsden
2006-01-06 00:11:47 -08:00
committed by Linus Torvalds
parent 599a6e8ca4
commit 7c4cb60e5b
7 changed files with 29 additions and 17 deletions

View File

@@ -15,9 +15,6 @@
#include <asm/mmu.h>
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
@@ -29,6 +26,11 @@ struct Xgt_desc_struct {
extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
return ((struct desc_struct *)cpu_gdt_descr[cpu].address);
}
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))