Merge branch 'linus' into tracing/core
Merge reason: Pick up kernel/softirq.c update for dependent fix. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -22,10 +22,6 @@
|
||||
*/
|
||||
#define flush_agp_cache() wbinvd()
|
||||
|
||||
/* Convert a physical address to an address suitable for the GART. */
|
||||
#define phys_to_gart(x) (x)
|
||||
#define gart_to_phys(x) (x)
|
||||
|
||||
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
|
||||
#define alloc_gatt_pages(order) \
|
||||
((char *)__get_free_pages(GFP_KERNEL, (order)))
|
||||
|
@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
|
||||
static inline void alternatives_smp_switch(int smp) {}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
const unsigned char *const *find_nop_table(void);
|
||||
|
||||
/* alternative assembly primitive: */
|
||||
#define ALTERNATIVE(oldinstr, newinstr, feature) \
|
||||
\
|
||||
@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
|
||||
#define __parainstructions_end NULL
|
||||
#endif
|
||||
|
||||
extern void add_nops(void *insns, unsigned int len);
|
||||
|
||||
/*
|
||||
* Clear and restore the kernel write-protection flag on the local CPU.
|
||||
* Allows the kernel to edit read-only pages.
|
||||
@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len);
|
||||
* Intel's errata.
|
||||
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
|
||||
* inconsistent instruction while you patch.
|
||||
* The _early version expects the memory to already be RW.
|
||||
*/
|
||||
|
||||
extern void *text_poke(void *addr, const void *opcode, size_t len);
|
||||
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
|
||||
|
||||
#endif /* _ASM_X86_ALTERNATIVE_H */
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#ifdef CONFIG_AMD_IOMMU
|
||||
extern int amd_iommu_init(void);
|
||||
extern int amd_iommu_init_dma_ops(void);
|
||||
extern int amd_iommu_init_passthrough(void);
|
||||
extern void amd_iommu_detect(void);
|
||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
extern void amd_iommu_flush_all_domains(void);
|
||||
|
@@ -143,22 +143,29 @@
|
||||
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
|
||||
#define EVT_LEN_MASK (0x9ULL << 56)
|
||||
|
||||
#define PAGE_MODE_NONE 0x00
|
||||
#define PAGE_MODE_1_LEVEL 0x01
|
||||
#define PAGE_MODE_2_LEVEL 0x02
|
||||
#define PAGE_MODE_3_LEVEL 0x03
|
||||
#define PAGE_MODE_4_LEVEL 0x04
|
||||
#define PAGE_MODE_5_LEVEL 0x05
|
||||
#define PAGE_MODE_6_LEVEL 0x06
|
||||
|
||||
#define IOMMU_PDE_NL_0 0x000ULL
|
||||
#define IOMMU_PDE_NL_1 0x200ULL
|
||||
#define IOMMU_PDE_NL_2 0x400ULL
|
||||
#define IOMMU_PDE_NL_3 0x600ULL
|
||||
#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
|
||||
#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
|
||||
((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
|
||||
(0xffffffffffffffffULL))
|
||||
#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
|
||||
#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
|
||||
#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
|
||||
IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
|
||||
#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
|
||||
|
||||
#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
|
||||
#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
|
||||
#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
|
||||
|
||||
#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
|
||||
#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
|
||||
#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
|
||||
#define PM_MAP_4k 0
|
||||
#define PM_ADDR_MASK 0x000ffffffffff000ULL
|
||||
#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
|
||||
(~((1ULL << (12 + ((lvl) * 9))) - 1)))
|
||||
#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
|
||||
|
||||
#define IOMMU_PTE_P (1ULL << 0)
|
||||
#define IOMMU_PTE_TV (1ULL << 1)
|
||||
@@ -167,11 +174,6 @@
|
||||
#define IOMMU_PTE_IR (1ULL << 61)
|
||||
#define IOMMU_PTE_IW (1ULL << 62)
|
||||
|
||||
#define IOMMU_L1_PDE(address) \
|
||||
((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
|
||||
#define IOMMU_L2_PDE(address) \
|
||||
((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
|
||||
|
||||
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
|
||||
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
|
||||
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
|
||||
@@ -194,11 +196,14 @@
|
||||
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
||||
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
||||
domain for an IOMMU */
|
||||
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
|
||||
translation */
|
||||
|
||||
extern bool amd_iommu_dump;
|
||||
#define DUMP_printk(format, arg...) \
|
||||
do { \
|
||||
if (amd_iommu_dump) \
|
||||
printk(KERN_INFO "AMD IOMMU: " format, ## arg); \
|
||||
printk(KERN_INFO "AMD-Vi: " format, ## arg); \
|
||||
} while(0);
|
||||
|
||||
/*
|
||||
@@ -226,6 +231,7 @@ struct protection_domain {
|
||||
int mode; /* paging mode (0-6 levels) */
|
||||
u64 *pt_root; /* page table root pointer */
|
||||
unsigned long flags; /* flags to find out type of domain */
|
||||
bool updated; /* complete domain flush required */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
void *priv; /* private data */
|
||||
};
|
||||
@@ -337,6 +343,9 @@ struct amd_iommu {
|
||||
/* if one, we need to send a completion wait command */
|
||||
bool need_sync;
|
||||
|
||||
/* becomes true if a command buffer reset is running */
|
||||
bool reset_in_progress;
|
||||
|
||||
/* default dma_ops domain for that IOMMU */
|
||||
struct dma_ops_domain *default_dom;
|
||||
};
|
||||
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { }
|
||||
|
||||
#endif /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
/* some function prototypes */
|
||||
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
|
||||
|
@@ -183,6 +183,10 @@ static inline int x2apic_enabled(void)
|
||||
}
|
||||
|
||||
#define x2apic_supported() (cpu_has_x2apic)
|
||||
static inline void x2apic_force_phys(void)
|
||||
{
|
||||
x2apic_phys = 1;
|
||||
}
|
||||
#else
|
||||
static inline void check_x2apic(void)
|
||||
{
|
||||
@@ -194,6 +198,9 @@ static inline int x2apic_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void x2apic_force_phys(void)
|
||||
{
|
||||
}
|
||||
|
||||
#define x2apic_preenabled 0
|
||||
#define x2apic_supported() 0
|
||||
|
@@ -8,12 +8,14 @@
|
||||
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
|
||||
*/
|
||||
|
||||
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
|
||||
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
|
||||
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
|
||||
|
||||
#define APIC_ID 0x20
|
||||
|
||||
#define APIC_LVR 0x30
|
||||
#define APIC_LVR_MASK 0xFF00FF
|
||||
#define APIC_LVR_DIRECTED_EOI (1 << 24)
|
||||
#define GET_APIC_VERSION(x) ((x) & 0xFFu)
|
||||
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -40,6 +42,7 @@
|
||||
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
|
||||
#define APIC_DFR_FLAT 0xFFFFFFFFul
|
||||
#define APIC_SPIV 0xF0
|
||||
#define APIC_SPIV_DIRECTED_EOI (1 << 12)
|
||||
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
|
||||
#define APIC_SPIV_APIC_ENABLED (1 << 8)
|
||||
#define APIC_ISR 0x100
|
||||
|
@@ -3,7 +3,7 @@
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define __ASM_FORM(x) x
|
||||
# define __ASM_EX_SEC .section __ex_table
|
||||
# define __ASM_EX_SEC .section __ex_table, "a"
|
||||
#else
|
||||
# define __ASM_FORM(x) " " #x " "
|
||||
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
|
||||
@@ -38,10 +38,18 @@
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/* Exception table entry */
|
||||
#ifdef __ASSEMBLY__
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
__ASM_EX_SEC ; \
|
||||
_ASM_ALIGN ; \
|
||||
_ASM_PTR from , to ; \
|
||||
.previous
|
||||
#else
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
__ASM_EX_SEC \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR #from "," #to "\n" \
|
||||
" .previous\n"
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_ASM_H */
|
||||
|
@@ -85,7 +85,8 @@ struct efi_info {
|
||||
struct boot_params {
|
||||
struct screen_info screen_info; /* 0x000 */
|
||||
struct apm_bios_info apm_bios_info; /* 0x040 */
|
||||
__u8 _pad2[12]; /* 0x054 */
|
||||
__u8 _pad2[4]; /* 0x054 */
|
||||
__u64 tboot_addr; /* 0x058 */
|
||||
struct ist_info ist_info; /* 0x060 */
|
||||
__u8 _pad3[16]; /* 0x070 */
|
||||
__u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
|
||||
|
@@ -43,8 +43,58 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
#define PG_non_WB PG_arch_1
|
||||
PAGEFLAG(NonWB, non_WB)
|
||||
#define PG_WC PG_arch_1
|
||||
PAGEFLAG(WC, WC)
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
/*
|
||||
* X86 PAT uses page flags WC and Uncached together to keep track of
|
||||
* memory type of pages that have backing page struct. X86 PAT supports 3
|
||||
* different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
|
||||
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
|
||||
* been changed from its default (value of -1 used to denote this).
|
||||
* Note we do not support _PAGE_CACHE_UC here.
|
||||
*
|
||||
* Caller must hold memtype_lock for atomicity.
|
||||
*/
|
||||
static inline unsigned long get_page_memtype(struct page *pg)
|
||||
{
|
||||
if (!PageUncached(pg) && !PageWC(pg))
|
||||
return -1;
|
||||
else if (!PageUncached(pg) && PageWC(pg))
|
||||
return _PAGE_CACHE_WC;
|
||||
else if (PageUncached(pg) && !PageWC(pg))
|
||||
return _PAGE_CACHE_UC_MINUS;
|
||||
else
|
||||
return _PAGE_CACHE_WB;
|
||||
}
|
||||
|
||||
static inline void set_page_memtype(struct page *pg, unsigned long memtype)
|
||||
{
|
||||
switch (memtype) {
|
||||
case _PAGE_CACHE_WC:
|
||||
ClearPageUncached(pg);
|
||||
SetPageWC(pg);
|
||||
break;
|
||||
case _PAGE_CACHE_UC_MINUS:
|
||||
SetPageUncached(pg);
|
||||
ClearPageWC(pg);
|
||||
break;
|
||||
case _PAGE_CACHE_WB:
|
||||
SetPageUncached(pg);
|
||||
SetPageWC(pg);
|
||||
break;
|
||||
default:
|
||||
case -1:
|
||||
ClearPageUncached(pg);
|
||||
ClearPageWC(pg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
|
||||
static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The set_memory_* API can be used to change various attributes of a virtual
|
||||
|
@@ -95,6 +95,7 @@
|
||||
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
|
||||
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
|
||||
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
|
||||
|
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
|
||||
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return percpu_read(current_task);
|
||||
return percpu_read_stable(current_task);
|
||||
}
|
||||
|
||||
#define current get_current()
|
||||
|
@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc)
|
||||
return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
|
||||
}
|
||||
|
||||
static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
|
||||
{
|
||||
desc->base0 = base & 0xffff;
|
||||
desc->base1 = (base >> 16) & 0xff;
|
||||
desc->base2 = (base >> 24) & 0xff;
|
||||
}
|
||||
|
||||
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
|
||||
{
|
||||
return desc->limit0 | (desc->limit << 16);
|
||||
}
|
||||
|
||||
static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
|
||||
{
|
||||
desc->limit0 = limit & 0xffff;
|
||||
desc->limit = (limit >> 16) & 0xf;
|
||||
}
|
||||
|
||||
static inline void _set_gate(int gate, unsigned type, void *addr,
|
||||
unsigned dpl, unsigned ist, unsigned seg)
|
||||
{
|
||||
|
@@ -34,6 +34,12 @@ struct desc_struct {
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
#define GDT_ENTRY_INIT(flags, base, limit) { { { \
|
||||
.a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
|
||||
.b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
|
||||
((limit) & 0xf0000) | ((base) & 0xff000000), \
|
||||
} } }
|
||||
|
||||
enum {
|
||||
GATE_INTERRUPT = 0xE,
|
||||
GATE_TRAP = 0xF,
|
||||
|
@@ -13,4 +13,7 @@ struct dma_map_ops *dma_ops;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pdev_archdata {
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_DEVICE_H */
|
||||
|
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask);
|
||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag);
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
@@ -87,9 +87,25 @@
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#else /*!CONFIG_X86_64*/
|
||||
.macro pushl_cfi reg
|
||||
pushl \reg
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
.endm
|
||||
|
||||
/* 32bit defenitions are missed yet */
|
||||
.macro popl_cfi reg
|
||||
popl \reg
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
.endm
|
||||
|
||||
.macro movl_cfi reg offset=0
|
||||
movl %\reg, \offset(%esp)
|
||||
CFI_REL_OFFSET \reg, \offset
|
||||
.endm
|
||||
|
||||
.macro movl_cfi_restore offset reg
|
||||
movl \offset(%esp), %\reg
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#endif /*!CONFIG_X86_64*/
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
|
@@ -131,6 +131,9 @@ enum fixed_addresses {
|
||||
#endif
|
||||
#ifdef CONFIG_X86_32
|
||||
FIX_WP_TEST,
|
||||
#endif
|
||||
#ifdef CONFIG_INTEL_TXT
|
||||
FIX_TBOOT_BASE,
|
||||
#endif
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
@@ -26,6 +26,7 @@ extern void fpu_init(void);
|
||||
extern void mxcsr_feature_mask_init(void);
|
||||
extern int init_fpu(struct task_struct *child);
|
||||
extern asmlinkage void math_state_restore(void);
|
||||
extern void __math_state_restore(void);
|
||||
extern void init_thread_xstate(void);
|
||||
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
|
||||
|
||||
@@ -301,6 +302,14 @@ static inline void kernel_fpu_end(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline bool irq_fpu_usable(void)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
|
||||
return !in_interrupt() || !(regs = get_irq_regs()) || \
|
||||
user_mode(regs) || (read_cr0() & X86_CR0_TS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some instructions like VIA's padlock instructions generate a spurious
|
||||
* DNA fault but don't modify SSE registers. And these instructions
|
||||
|
@@ -150,11 +150,10 @@ extern int timer_through_8259;
|
||||
#define io_apic_assign_pci_irqs \
|
||||
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
extern u8 io_apic_unique_id(u8 id);
|
||||
extern int io_apic_get_unique_id(int ioapic, int apic_id);
|
||||
extern int io_apic_get_version(int ioapic);
|
||||
extern int io_apic_get_redir_entries(int ioapic);
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
struct io_apic_irq_attr;
|
||||
extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
@@ -177,6 +176,16 @@ extern int setup_ioapic_entry(int apic, int irq,
|
||||
int polarity, int vector, int pin);
|
||||
extern void ioapic_write_entry(int apic, int pin,
|
||||
struct IO_APIC_route_entry e);
|
||||
|
||||
struct mp_ioapic_gsi{
|
||||
int gsi_base;
|
||||
int gsi_end;
|
||||
};
|
||||
extern struct mp_ioapic_gsi mp_gsi_routing[];
|
||||
int mp_find_ioapic(int gsi);
|
||||
int mp_find_ioapic_pin(int ioapic, int gsi);
|
||||
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
|
||||
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
static const int timer_through_8259 = 0;
|
||||
|
@@ -1,94 +1 @@
|
||||
#ifndef _ASM_X86_IOCTLS_H
|
||||
#define _ASM_X86_IOCTLS_H
|
||||
|
||||
#include <asm/ioctl.h>
|
||||
|
||||
/* 0x54 is just a magic number to make these relatively unique ('T') */
|
||||
|
||||
#define TCGETS 0x5401
|
||||
#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
|
||||
#define TCSETSW 0x5403
|
||||
#define TCSETSF 0x5404
|
||||
#define TCGETA 0x5405
|
||||
#define TCSETA 0x5406
|
||||
#define TCSETAW 0x5407
|
||||
#define TCSETAF 0x5408
|
||||
#define TCSBRK 0x5409
|
||||
#define TCXONC 0x540A
|
||||
#define TCFLSH 0x540B
|
||||
#define TIOCEXCL 0x540C
|
||||
#define TIOCNXCL 0x540D
|
||||
#define TIOCSCTTY 0x540E
|
||||
#define TIOCGPGRP 0x540F
|
||||
#define TIOCSPGRP 0x5410
|
||||
#define TIOCOUTQ 0x5411
|
||||
#define TIOCSTI 0x5412
|
||||
#define TIOCGWINSZ 0x5413
|
||||
#define TIOCSWINSZ 0x5414
|
||||
#define TIOCMGET 0x5415
|
||||
#define TIOCMBIS 0x5416
|
||||
#define TIOCMBIC 0x5417
|
||||
#define TIOCMSET 0x5418
|
||||
#define TIOCGSOFTCAR 0x5419
|
||||
#define TIOCSSOFTCAR 0x541A
|
||||
#define FIONREAD 0x541B
|
||||
#define TIOCINQ FIONREAD
|
||||
#define TIOCLINUX 0x541C
|
||||
#define TIOCCONS 0x541D
|
||||
#define TIOCGSERIAL 0x541E
|
||||
#define TIOCSSERIAL 0x541F
|
||||
#define TIOCPKT 0x5420
|
||||
#define FIONBIO 0x5421
|
||||
#define TIOCNOTTY 0x5422
|
||||
#define TIOCSETD 0x5423
|
||||
#define TIOCGETD 0x5424
|
||||
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
|
||||
/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
|
||||
#define TIOCSBRK 0x5427 /* BSD compatibility */
|
||||
#define TIOCCBRK 0x5428 /* BSD compatibility */
|
||||
#define TIOCGSID 0x5429 /* Return the session ID of FD */
|
||||
#define TCGETS2 _IOR('T', 0x2A, struct termios2)
|
||||
#define TCSETS2 _IOW('T', 0x2B, struct termios2)
|
||||
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
|
||||
#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
|
||||
#define TIOCGRS485 0x542E
|
||||
#define TIOCSRS485 0x542F
|
||||
#define TIOCGPTN _IOR('T', 0x30, unsigned int)
|
||||
/* Get Pty Number (of pty-mux device) */
|
||||
#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
|
||||
#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
|
||||
#define TCSETX 0x5433
|
||||
#define TCSETXF 0x5434
|
||||
#define TCSETXW 0x5435
|
||||
|
||||
#define FIONCLEX 0x5450
|
||||
#define FIOCLEX 0x5451
|
||||
#define FIOASYNC 0x5452
|
||||
#define TIOCSERCONFIG 0x5453
|
||||
#define TIOCSERGWILD 0x5454
|
||||
#define TIOCSERSWILD 0x5455
|
||||
#define TIOCGLCKTRMIOS 0x5456
|
||||
#define TIOCSLCKTRMIOS 0x5457
|
||||
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
|
||||
#define TIOCSERGETLSR 0x5459 /* Get line status register */
|
||||
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
|
||||
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
|
||||
|
||||
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
|
||||
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
|
||||
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
|
||||
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
|
||||
#define FIOQSIZE 0x5460
|
||||
|
||||
/* Used for packet mode */
|
||||
#define TIOCPKT_DATA 0
|
||||
#define TIOCPKT_FLUSHREAD 1
|
||||
#define TIOCPKT_FLUSHWRITE 2
|
||||
#define TIOCPKT_STOP 4
|
||||
#define TIOCPKT_START 8
|
||||
#define TIOCPKT_NOSTOP 16
|
||||
#define TIOCPKT_DOSTOP 32
|
||||
|
||||
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
|
||||
|
||||
#endif /* _ASM_X86_IOCTLS_H */
|
||||
#include <asm-generic/ioctls.h>
|
||||
|
@@ -26,13 +26,16 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
int
|
||||
is_io_mapping_possible(resource_size_t base, unsigned long size);
|
||||
|
||||
void *
|
||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||
|
||||
void
|
||||
iounmap_atomic(void *kvaddr, enum km_type type);
|
||||
|
||||
int
|
||||
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
|
||||
|
||||
void
|
||||
iomap_free(resource_size_t base, unsigned long size);
|
||||
|
||||
#endif /* _ASM_X86_IOMAP_H */
|
||||
|
@@ -1,28 +1 @@
|
||||
#ifndef _ASM_X86_IPCBUF_H
|
||||
#define _ASM_X86_IPCBUF_H
|
||||
|
||||
/*
|
||||
* The ipc64_perm structure for x86 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space is left for:
|
||||
* - 32-bit mode_t and seq
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*/
|
||||
|
||||
struct ipc64_perm {
|
||||
__kernel_key_t key;
|
||||
__kernel_uid32_t uid;
|
||||
__kernel_gid32_t gid;
|
||||
__kernel_uid32_t cuid;
|
||||
__kernel_gid32_t cgid;
|
||||
__kernel_mode_t mode;
|
||||
unsigned short __pad1;
|
||||
unsigned short seq;
|
||||
unsigned short __pad2;
|
||||
unsigned long __unused1;
|
||||
unsigned long __unused2;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_IPCBUF_H */
|
||||
#include <asm-generic/ipcbuf.h>
|
||||
|
@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void)
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Note: this needs to be "=r" not "=rm", because we have the
|
||||
* stack offset from what gcc expects at the time the "pop" is
|
||||
* executed, and so a memory reference with respect to the stack
|
||||
* would end up using the wrong address.
|
||||
* "=rm" is safe here, because "pop" adjusts the stack before
|
||||
* it evaluates its effective address -- this is part of the
|
||||
* documented behavior of the "pop" instruction.
|
||||
*/
|
||||
asm volatile("# __raw_save_flags\n\t"
|
||||
"pushf ; pop %0"
|
||||
: "=r" (flags)
|
||||
: "=rm" (flags)
|
||||
: /* no input */
|
||||
: "memory");
|
||||
|
||||
|
@@ -17,6 +17,8 @@
|
||||
#define __KVM_HAVE_USER_NMI
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_MSIX
|
||||
#define __KVM_HAVE_MCE
|
||||
#define __KVM_HAVE_PIT_STATE2
|
||||
|
||||
/* Architectural interrupt line count. */
|
||||
#define KVM_NR_INTERRUPTS 256
|
||||
@@ -236,6 +238,14 @@ struct kvm_pit_state {
|
||||
struct kvm_pit_channel_state channels[3];
|
||||
};
|
||||
|
||||
#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
|
||||
|
||||
struct kvm_pit_state2 {
|
||||
struct kvm_pit_channel_state channels[3];
|
||||
__u32 flags;
|
||||
__u32 reserved[9];
|
||||
};
|
||||
|
||||
struct kvm_reinject_control {
|
||||
__u8 pit_reinject;
|
||||
__u8 reserved[31];
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_para.h>
|
||||
@@ -37,12 +38,14 @@
|
||||
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
|
||||
0xFFFFFF0000000000ULL)
|
||||
|
||||
#define KVM_GUEST_CR0_MASK \
|
||||
(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
|
||||
| X86_CR0_NW | X86_CR0_CD)
|
||||
#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
|
||||
(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
|
||||
#define KVM_GUEST_CR0_MASK \
|
||||
(KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
|
||||
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
|
||||
(X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
|
||||
#define KVM_VM_CR0_ALWAYS_ON \
|
||||
(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
|
||||
| X86_CR0_MP)
|
||||
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
|
||||
#define KVM_GUEST_CR4_MASK \
|
||||
(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
|
||||
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
|
||||
@@ -51,12 +54,12 @@
|
||||
#define INVALID_PAGE (~(hpa_t)0)
|
||||
#define UNMAPPED_GVA (~(gpa_t)0)
|
||||
|
||||
/* shadow tables are PAE even on non-PAE hosts */
|
||||
#define KVM_HPAGE_SHIFT 21
|
||||
#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
|
||||
#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
|
||||
|
||||
#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
|
||||
/* KVM Hugepage definitions for x86 */
|
||||
#define KVM_NR_PAGE_SIZES 3
|
||||
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
|
||||
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
|
||||
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
|
||||
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
|
||||
|
||||
#define DE_VECTOR 0
|
||||
#define DB_VECTOR 1
|
||||
@@ -120,6 +123,10 @@ enum kvm_reg {
|
||||
NR_VCPU_REGS
|
||||
};
|
||||
|
||||
enum kvm_reg_ex {
|
||||
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
|
||||
};
|
||||
|
||||
enum {
|
||||
VCPU_SREG_ES,
|
||||
VCPU_SREG_CS,
|
||||
@@ -131,7 +138,7 @@ enum {
|
||||
VCPU_SREG_LDTR,
|
||||
};
|
||||
|
||||
#include <asm/kvm_x86_emulate.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#define KVM_NR_MEM_OBJS 40
|
||||
|
||||
@@ -308,7 +315,6 @@ struct kvm_vcpu_arch {
|
||||
struct {
|
||||
gfn_t gfn; /* presumed gfn during guest pte update */
|
||||
pfn_t pfn; /* pfn corresponding to that gfn */
|
||||
int largepage;
|
||||
unsigned long mmu_seq;
|
||||
} update_pte;
|
||||
|
||||
@@ -334,16 +340,6 @@ struct kvm_vcpu_arch {
|
||||
u8 nr;
|
||||
} interrupt;
|
||||
|
||||
struct {
|
||||
int vm86_active;
|
||||
u8 save_iopl;
|
||||
struct kvm_save_segment {
|
||||
u16 selector;
|
||||
unsigned long base;
|
||||
u32 limit;
|
||||
u32 ar;
|
||||
} tr, es, ds, fs, gs;
|
||||
} rmode;
|
||||
int halt_request; /* real mode on Intel only */
|
||||
|
||||
int cpuid_nent;
|
||||
@@ -366,13 +362,15 @@ struct kvm_vcpu_arch {
|
||||
u32 pat;
|
||||
|
||||
int switch_db_regs;
|
||||
unsigned long host_db[KVM_NR_DB_REGS];
|
||||
unsigned long host_dr6;
|
||||
unsigned long host_dr7;
|
||||
unsigned long db[KVM_NR_DB_REGS];
|
||||
unsigned long dr6;
|
||||
unsigned long dr7;
|
||||
unsigned long eff_db[KVM_NR_DB_REGS];
|
||||
|
||||
u64 mcg_cap;
|
||||
u64 mcg_status;
|
||||
u64 mcg_ctl;
|
||||
u64 *mce_banks;
|
||||
};
|
||||
|
||||
struct kvm_mem_alias {
|
||||
@@ -409,6 +407,7 @@ struct kvm_arch{
|
||||
|
||||
struct page *ept_identity_pagetable;
|
||||
bool ept_identity_pagetable_done;
|
||||
gpa_t ept_identity_map_addr;
|
||||
|
||||
unsigned long irq_sources_bitmap;
|
||||
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
|
||||
@@ -526,6 +525,9 @@ struct kvm_x86_ops {
|
||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||
int (*get_tdp_level)(void);
|
||||
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
|
||||
bool (*gb_page_enable)(void);
|
||||
|
||||
const struct trace_print_flags *exit_reasons_str;
|
||||
};
|
||||
|
||||
extern struct kvm_x86_ops *kvm_x86_ops;
|
||||
@@ -618,6 +620,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
|
||||
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
|
||||
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
|
||||
u32 error_code);
|
||||
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
|
||||
|
||||
int kvm_pic_set_irq(void *opaque, int irq, int level);
|
||||
|
||||
@@ -752,8 +755,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
|
||||
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
|
||||
}
|
||||
|
||||
#define MSR_IA32_TIME_STAMP_COUNTER 0x010
|
||||
|
||||
#define TSS_IOPB_BASE_OFFSET 0x66
|
||||
#define TSS_BASE_SIZE 0x68
|
||||
#define TSS_IOPB_SIZE (65536 / 8)
|
||||
@@ -796,5 +797,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
||||
|
||||
#endif /* _ASM_X86_KVM_HOST_H */
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#ifndef _ASM_X86_KVM_PARA_H
|
||||
#define _ASM_X86_KVM_PARA_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
|
||||
* should be used to determine that a VM is running under KVM.
|
||||
*/
|
||||
|
@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void)
|
||||
}
|
||||
|
||||
/* Full 4G segment descriptors, suitable for CS and DS. */
|
||||
#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } })
|
||||
#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } })
|
||||
#define FULL_EXEC_SEGMENT \
|
||||
((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
|
||||
#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@@ -1,20 +1,8 @@
|
||||
#ifndef _ASM_X86_MMAN_H
|
||||
#define _ASM_X86_MMAN_H
|
||||
|
||||
#include <asm-generic/mman-common.h>
|
||||
|
||||
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
|
||||
|
||||
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
|
||||
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
|
||||
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
|
||||
#define MAP_LOCKED 0x2000 /* pages are locked */
|
||||
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
|
||||
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
|
||||
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
|
||||
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
|
||||
|
||||
#define MCL_CURRENT 1 /* lock all current mappings */
|
||||
#define MCL_FUTURE 2 /* lock all future mappings */
|
||||
#include <asm-generic/mman.h>
|
||||
|
||||
#endif /* _ASM_X86_MMAN_H */
|
||||
|
@@ -1,18 +1,7 @@
|
||||
#ifndef _ASM_X86_MODULE_H
|
||||
#define _ASM_X86_MODULE_H
|
||||
|
||||
/* x86_32/64 are simple */
|
||||
struct mod_arch_specific {};
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# define Elf_Shdr Elf32_Shdr
|
||||
# define Elf_Sym Elf32_Sym
|
||||
# define Elf_Ehdr Elf32_Ehdr
|
||||
#else
|
||||
# define Elf_Shdr Elf64_Shdr
|
||||
# define Elf_Sym Elf64_Sym
|
||||
# define Elf_Ehdr Elf64_Ehdr
|
||||
#endif
|
||||
#include <asm-generic/module.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* X86_64 does not define MODULE_PROC_FAMILY */
|
||||
@@ -28,6 +17,8 @@ struct mod_arch_specific {};
|
||||
#define MODULE_PROC_FAMILY "586MMX "
|
||||
#elif defined CONFIG_MCORE2
|
||||
#define MODULE_PROC_FAMILY "CORE2 "
|
||||
#elif defined CONFIG_MATOM
|
||||
#define MODULE_PROC_FAMILY "ATOM "
|
||||
#elif defined CONFIG_M686
|
||||
#define MODULE_PROC_FAMILY "686 "
|
||||
#elif defined CONFIG_MPENTIUMII
|
||||
|
@@ -1,39 +1 @@
|
||||
#ifndef _ASM_X86_MSGBUF_H
|
||||
#define _ASM_X86_MSGBUF_H
|
||||
|
||||
/*
|
||||
* The msqid64_ds structure for i386 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space on i386 is left for:
|
||||
* - 64-bit time_t to solve y2038 problem
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*
|
||||
* Pad space on x8664 is left for:
|
||||
* - 2 miscellaneous 64-bit values
|
||||
*/
|
||||
struct msqid64_ds {
|
||||
struct ipc64_perm msg_perm;
|
||||
__kernel_time_t msg_stime; /* last msgsnd time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused1;
|
||||
#endif
|
||||
__kernel_time_t msg_rtime; /* last msgrcv time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused2;
|
||||
#endif
|
||||
__kernel_time_t msg_ctime; /* last change time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused3;
|
||||
#endif
|
||||
unsigned long msg_cbytes; /* current number of bytes on queue */
|
||||
unsigned long msg_qnum; /* number of messages in queue */
|
||||
unsigned long msg_qbytes; /* max number of bytes on queue */
|
||||
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
|
||||
__kernel_pid_t msg_lrpid; /* last receive pid */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_MSGBUF_H */
|
||||
#include <asm-generic/msgbuf.h>
|
||||
|
@@ -374,6 +374,7 @@
|
||||
/* AMD-V MSRs */
|
||||
|
||||
#define MSR_VM_CR 0xc0010114
|
||||
#define MSR_VM_IGNNE 0xc0010115
|
||||
#define MSR_VM_HSAVE_PA 0xc0010117
|
||||
|
||||
#endif /* _ASM_X86_MSR_INDEX_H */
|
||||
|
@@ -3,10 +3,16 @@
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
|
||||
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/cpumask.h>
|
||||
@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), [fault] "i" (-EFAULT));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("2: rdmsr ; xor %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: mov %3,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
|
||||
: "c" (msr), [fault] "i" (-EIO));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: [err] "=a" (err)
|
||||
: "c" (msr), "0" (low), "d" (high),
|
||||
[fault] "i" (-EFAULT)
|
||||
[fault] "i" (-EIO)
|
||||
: "memory");
|
||||
return err;
|
||||
}
|
||||
|
||||
extern unsigned long long native_read_tsc(void);
|
||||
|
||||
extern int native_rdmsr_safe_regs(u32 regs[8]);
|
||||
extern int native_wrmsr_safe_regs(u32 regs[8]);
|
||||
|
||||
static __always_inline unsigned long long __native_read_tsc(void)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
*p = native_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
int err;
|
||||
|
||||
*p = native_read_msr_amd_safe(msr, &err);
|
||||
gprs[1] = msr;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
err = native_rdmsr_safe_regs(gprs);
|
||||
|
||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
|
||||
gprs[0] = (u32)val;
|
||||
gprs[1] = msr;
|
||||
gprs[2] = val >> 32;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
return native_wrmsr_safe_regs(gprs);
|
||||
}
|
||||
|
||||
static inline int rdmsr_safe_regs(u32 regs[8])
|
||||
{
|
||||
return native_rdmsr_safe_regs(regs);
|
||||
}
|
||||
|
||||
static inline int wrmsr_safe_regs(u32 regs[8])
|
||||
{
|
||||
return native_wrmsr_safe_regs(regs);
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)__native_read_tsc())
|
||||
|
||||
@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
|
||||
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return rdmsr_safe_regs(regs);
|
||||
}
|
||||
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return wrmsr_safe_regs(regs);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_MSR_H */
|
||||
|
@@ -121,6 +121,9 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
|
||||
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
|
||||
extern void mtrr_ap_init(void);
|
||||
extern void mtrr_bp_init(void);
|
||||
extern void set_mtrr_aps_delayed_init(void);
|
||||
extern void mtrr_aps_init(void);
|
||||
extern void mtrr_bp_restore(void);
|
||||
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
|
||||
extern int amd_special_default_mtrr(void);
|
||||
# else
|
||||
@@ -161,6 +164,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
|
||||
|
||||
#define mtrr_ap_init() do {} while (0)
|
||||
#define mtrr_bp_init() do {} while (0)
|
||||
#define set_mtrr_aps_delayed_init() do {} while (0)
|
||||
#define mtrr_aps_init() do {} while (0)
|
||||
#define mtrr_bp_restore() do {} while (0)
|
||||
# endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int unknown_nmi_panic;
|
||||
|
||||
void __trigger_all_cpu_backtrace(void);
|
||||
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
|
||||
static inline void localise_nmi_watchdog(void)
|
||||
{
|
||||
|
@@ -1,22 +1 @@
|
||||
#ifndef _ASM_X86_PARAM_H
|
||||
#define _ASM_X86_PARAM_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
|
||||
# define USER_HZ 100 /* some user interfaces are */
|
||||
# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
|
||||
#endif
|
||||
|
||||
#ifndef HZ
|
||||
#define HZ 100
|
||||
#endif
|
||||
|
||||
#define EXEC_PAGESIZE 4096
|
||||
|
||||
#ifndef NOGROUP
|
||||
#define NOGROUP (-1)
|
||||
#endif
|
||||
|
||||
#define MAXHOSTNAMELEN 64 /* max length of hostname */
|
||||
|
||||
#endif /* _ASM_X86_PARAM_H */
|
||||
#include <asm-generic/param.h>
|
||||
|
@@ -7,689 +7,11 @@
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
/* Bitmask of what can be clobbered: usually at least eax. */
|
||||
#define CLBR_NONE 0
|
||||
#define CLBR_EAX (1 << 0)
|
||||
#define CLBR_ECX (1 << 1)
|
||||
#define CLBR_EDX (1 << 2)
|
||||
#define CLBR_EDI (1 << 3)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
|
||||
#define CLBR_ANY ((1 << 4) - 1)
|
||||
|
||||
#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
|
||||
#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
|
||||
#define CLBR_SCRATCH (0)
|
||||
#else
|
||||
#define CLBR_RAX CLBR_EAX
|
||||
#define CLBR_RCX CLBR_ECX
|
||||
#define CLBR_RDX CLBR_EDX
|
||||
#define CLBR_RDI CLBR_EDI
|
||||
#define CLBR_RSI (1 << 4)
|
||||
#define CLBR_R8 (1 << 5)
|
||||
#define CLBR_R9 (1 << 6)
|
||||
#define CLBR_R10 (1 << 7)
|
||||
#define CLBR_R11 (1 << 8)
|
||||
|
||||
#define CLBR_ANY ((1 << 9) - 1)
|
||||
|
||||
#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
|
||||
CLBR_RCX | CLBR_R8 | CLBR_R9)
|
||||
#define CLBR_RET_REG (CLBR_RAX)
|
||||
#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
|
||||
|
||||
#include <asm/desc_defs.h>
|
||||
#endif /* X86_64 */
|
||||
|
||||
#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
|
||||
#include <asm/paravirt_types.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/desc_defs.h>
|
||||
|
||||
struct page;
|
||||
struct thread_struct;
|
||||
struct desc_ptr;
|
||||
struct tss_struct;
|
||||
struct mm_struct;
|
||||
struct desc_struct;
|
||||
struct task_struct;
|
||||
|
||||
/*
|
||||
* Wrapper type for pointers to code which uses the non-standard
|
||||
* calling convention. See PV_CALL_SAVE_REGS_THUNK below.
|
||||
*/
|
||||
struct paravirt_callee_save {
|
||||
void *func;
|
||||
};
|
||||
|
||||
/* general info */
|
||||
struct pv_info {
|
||||
unsigned int kernel_rpl;
|
||||
int shared_kernel_pmd;
|
||||
int paravirt_enabled;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct pv_init_ops {
|
||||
/*
|
||||
* Patch may replace one of the defined code sequences with
|
||||
* arbitrary code, subject to the same register constraints.
|
||||
* This generally means the code is not free to clobber any
|
||||
* registers other than EAX. The patch function should return
|
||||
* the number of bytes of code generated, as we nop pad the
|
||||
* rest in generic code.
|
||||
*/
|
||||
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
/* Basic arch-specific setup */
|
||||
void (*arch_setup)(void);
|
||||
char *(*memory_setup)(void);
|
||||
void (*post_allocator_init)(void);
|
||||
|
||||
/* Print a banner to identify the environment */
|
||||
void (*banner)(void);
|
||||
};
|
||||
|
||||
|
||||
struct pv_lazy_ops {
|
||||
/* Set deferred update mode, used for batching operations. */
|
||||
void (*enter)(void);
|
||||
void (*leave)(void);
|
||||
};
|
||||
|
||||
struct pv_time_ops {
|
||||
void (*time_init)(void);
|
||||
|
||||
/* Set and set time of day */
|
||||
unsigned long (*get_wallclock)(void);
|
||||
int (*set_wallclock)(unsigned long);
|
||||
|
||||
unsigned long long (*sched_clock)(void);
|
||||
unsigned long (*get_tsc_khz)(void);
|
||||
};
|
||||
|
||||
struct pv_cpu_ops {
|
||||
/* hooks for various privileged instructions */
|
||||
unsigned long (*get_debugreg)(int regno);
|
||||
void (*set_debugreg)(int regno, unsigned long value);
|
||||
|
||||
void (*clts)(void);
|
||||
|
||||
unsigned long (*read_cr0)(void);
|
||||
void (*write_cr0)(unsigned long);
|
||||
|
||||
unsigned long (*read_cr4_safe)(void);
|
||||
unsigned long (*read_cr4)(void);
|
||||
void (*write_cr4)(unsigned long);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long (*read_cr8)(void);
|
||||
void (*write_cr8)(unsigned long);
|
||||
#endif
|
||||
|
||||
/* Segment descriptor handling */
|
||||
void (*load_tr_desc)(void);
|
||||
void (*load_gdt)(const struct desc_ptr *);
|
||||
void (*load_idt)(const struct desc_ptr *);
|
||||
void (*store_gdt)(struct desc_ptr *);
|
||||
void (*store_idt)(struct desc_ptr *);
|
||||
void (*set_ldt)(const void *desc, unsigned entries);
|
||||
unsigned long (*store_tr)(void);
|
||||
void (*load_tls)(struct thread_struct *t, unsigned int cpu);
|
||||
#ifdef CONFIG_X86_64
|
||||
void (*load_gs_index)(unsigned int idx);
|
||||
#endif
|
||||
void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
|
||||
const void *desc);
|
||||
void (*write_gdt_entry)(struct desc_struct *,
|
||||
int entrynum, const void *desc, int size);
|
||||
void (*write_idt_entry)(gate_desc *,
|
||||
int entrynum, const gate_desc *gate);
|
||||
void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
|
||||
void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
|
||||
|
||||
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
|
||||
|
||||
void (*set_iopl_mask)(unsigned mask);
|
||||
|
||||
void (*wbinvd)(void);
|
||||
void (*io_delay)(void);
|
||||
|
||||
/* cpuid emulation, mostly so that caps bits can be disabled */
|
||||
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx);
|
||||
|
||||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr_amd)(unsigned int msr, int *err);
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
|
||||
u64 (*read_tsc)(void);
|
||||
u64 (*read_pmc)(int counter);
|
||||
unsigned long long (*read_tscp)(unsigned int *aux);
|
||||
|
||||
/*
|
||||
* Atomically enable interrupts and return to userspace. This
|
||||
* is only ever used to return to 32-bit processes; in a
|
||||
* 64-bit kernel, it's used for 32-on-64 compat processes, but
|
||||
* never native 64-bit processes. (Jump, not call.)
|
||||
*/
|
||||
void (*irq_enable_sysexit)(void);
|
||||
|
||||
/*
|
||||
* Switch to usermode gs and return to 64-bit usermode using
|
||||
* sysret. Only used in 64-bit kernels to return to 64-bit
|
||||
* processes. Usermode register state, including %rsp, must
|
||||
* already be restored.
|
||||
*/
|
||||
void (*usergs_sysret64)(void);
|
||||
|
||||
/*
|
||||
* Switch to usermode gs and return to 32-bit usermode using
|
||||
* sysret. Used to return to 32-on-64 compat processes.
|
||||
* Other usermode register state, including %esp, must already
|
||||
* be restored.
|
||||
*/
|
||||
void (*usergs_sysret32)(void);
|
||||
|
||||
/* Normal iret. Jump to this with the standard iret stack
|
||||
frame set up. */
|
||||
void (*iret)(void);
|
||||
|
||||
void (*swapgs)(void);
|
||||
|
||||
void (*start_context_switch)(struct task_struct *prev);
|
||||
void (*end_context_switch)(struct task_struct *next);
|
||||
};
|
||||
|
||||
struct pv_irq_ops {
|
||||
void (*init_IRQ)(void);
|
||||
|
||||
/*
|
||||
* Get/set interrupt state. save_fl and restore_fl are only
|
||||
* expected to use X86_EFLAGS_IF; all other bits
|
||||
* returned from save_fl are undefined, and may be ignored by
|
||||
* restore_fl.
|
||||
*
|
||||
* NOTE: These functions callers expect the callee to preserve
|
||||
* more registers than the standard C calling convention.
|
||||
*/
|
||||
struct paravirt_callee_save save_fl;
|
||||
struct paravirt_callee_save restore_fl;
|
||||
struct paravirt_callee_save irq_disable;
|
||||
struct paravirt_callee_save irq_enable;
|
||||
|
||||
void (*safe_halt)(void);
|
||||
void (*halt)(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void (*adjust_exception_frame)(void);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pv_apic_ops {
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
void (*setup_boot_clock)(void);
|
||||
void (*setup_secondary_clock)(void);
|
||||
|
||||
void (*startup_ipi_hook)(int phys_apicid,
|
||||
unsigned long start_eip,
|
||||
unsigned long start_esp);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pv_mmu_ops {
|
||||
/*
|
||||
* Called before/after init_mm pagetable setup. setup_start
|
||||
* may reset %cr3, and may pre-install parts of the pagetable;
|
||||
* pagetable setup is expected to preserve any existing
|
||||
* mapping.
|
||||
*/
|
||||
void (*pagetable_setup_start)(pgd_t *pgd_base);
|
||||
void (*pagetable_setup_done)(pgd_t *pgd_base);
|
||||
|
||||
unsigned long (*read_cr2)(void);
|
||||
void (*write_cr2)(unsigned long);
|
||||
|
||||
unsigned long (*read_cr3)(void);
|
||||
void (*write_cr3)(unsigned long);
|
||||
|
||||
/*
|
||||
* Hooks for intercepting the creation/use/destruction of an
|
||||
* mm_struct.
|
||||
*/
|
||||
void (*activate_mm)(struct mm_struct *prev,
|
||||
struct mm_struct *next);
|
||||
void (*dup_mmap)(struct mm_struct *oldmm,
|
||||
struct mm_struct *mm);
|
||||
void (*exit_mmap)(struct mm_struct *mm);
|
||||
|
||||
|
||||
/* TLB operations */
|
||||
void (*flush_tlb_user)(void);
|
||||
void (*flush_tlb_kernel)(void);
|
||||
void (*flush_tlb_single)(unsigned long addr);
|
||||
void (*flush_tlb_others)(const struct cpumask *cpus,
|
||||
struct mm_struct *mm,
|
||||
unsigned long va);
|
||||
|
||||
/* Hooks for allocating and freeing a pagetable top-level */
|
||||
int (*pgd_alloc)(struct mm_struct *mm);
|
||||
void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
/*
|
||||
* Hooks for allocating/releasing pagetable pages when they're
|
||||
* attached to a pagetable
|
||||
*/
|
||||
void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
|
||||
void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*release_pte)(unsigned long pfn);
|
||||
void (*release_pmd)(unsigned long pfn);
|
||||
void (*release_pud)(unsigned long pfn);
|
||||
|
||||
/* Pagetable manipulation functions */
|
||||
void (*set_pte)(pte_t *ptep, pte_t pteval);
|
||||
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval);
|
||||
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
|
||||
void (*pte_update)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*pte_update_defer)(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
|
||||
struct paravirt_callee_save pte_val;
|
||||
struct paravirt_callee_save make_pte;
|
||||
|
||||
struct paravirt_callee_save pgd_val;
|
||||
struct paravirt_callee_save make_pgd;
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#ifdef CONFIG_X86_PAE
|
||||
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
|
||||
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*pmd_clear)(pmd_t *pmdp);
|
||||
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
void (*set_pud)(pud_t *pudp, pud_t pudval);
|
||||
|
||||
struct paravirt_callee_save pmd_val;
|
||||
struct paravirt_callee_save make_pmd;
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
struct paravirt_callee_save pud_val;
|
||||
struct paravirt_callee_save make_pud;
|
||||
|
||||
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
|
||||
#ifdef CONFIG_HIGHPTE
|
||||
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
|
||||
#endif
|
||||
|
||||
struct pv_lazy_ops lazy_mode;
|
||||
|
||||
/* dom0 ops */
|
||||
|
||||
/* Sometimes the physical address is a pfn, and sometimes its
|
||||
an mfn. We can tell which is which from the index. */
|
||||
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
|
||||
phys_addr_t phys, pgprot_t flags);
|
||||
};
|
||||
|
||||
struct raw_spinlock;
|
||||
struct pv_lock_ops {
|
||||
int (*spin_is_locked)(struct raw_spinlock *lock);
|
||||
int (*spin_is_contended)(struct raw_spinlock *lock);
|
||||
void (*spin_lock)(struct raw_spinlock *lock);
|
||||
void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
|
||||
int (*spin_trylock)(struct raw_spinlock *lock);
|
||||
void (*spin_unlock)(struct raw_spinlock *lock);
|
||||
};
|
||||
|
||||
/* This contains all the paravirt structures: we get a convenient
|
||||
* number for each function using the offset which we use to indicate
|
||||
* what to patch. */
|
||||
struct paravirt_patch_template {
|
||||
struct pv_init_ops pv_init_ops;
|
||||
struct pv_time_ops pv_time_ops;
|
||||
struct pv_cpu_ops pv_cpu_ops;
|
||||
struct pv_irq_ops pv_irq_ops;
|
||||
struct pv_apic_ops pv_apic_ops;
|
||||
struct pv_mmu_ops pv_mmu_ops;
|
||||
struct pv_lock_ops pv_lock_ops;
|
||||
};
|
||||
|
||||
extern struct pv_info pv_info;
|
||||
extern struct pv_init_ops pv_init_ops;
|
||||
extern struct pv_time_ops pv_time_ops;
|
||||
extern struct pv_cpu_ops pv_cpu_ops;
|
||||
extern struct pv_irq_ops pv_irq_ops;
|
||||
extern struct pv_apic_ops pv_apic_ops;
|
||||
extern struct pv_mmu_ops pv_mmu_ops;
|
||||
extern struct pv_lock_ops pv_lock_ops;
|
||||
|
||||
#define PARAVIRT_PATCH(x) \
|
||||
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
|
||||
|
||||
#define paravirt_type(op) \
|
||||
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
|
||||
[paravirt_opptr] "i" (&(op))
|
||||
#define paravirt_clobber(clobber) \
|
||||
[paravirt_clobber] "i" (clobber)
|
||||
|
||||
/*
|
||||
* Generate some code, and mark it as patchable by the
|
||||
* apply_paravirt() alternate instruction patcher.
|
||||
*/
|
||||
#define _paravirt_alt(insn_string, type, clobber) \
|
||||
"771:\n\t" insn_string "\n" "772:\n" \
|
||||
".pushsection .parainstructions,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR " 771b\n" \
|
||||
" .byte " type "\n" \
|
||||
" .byte 772b-771b\n" \
|
||||
" .short " clobber "\n" \
|
||||
".popsection\n"
|
||||
|
||||
/* Generate patchable code, with the default asm parameters. */
|
||||
#define paravirt_alt(insn_string) \
|
||||
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
|
||||
|
||||
/* Simple instruction patching code. */
|
||||
#define DEF_NATIVE(ops, name, code) \
|
||||
extern const char start_##ops##_##name[], end_##ops##_##name[]; \
|
||||
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
|
||||
|
||||
unsigned paravirt_patch_nop(void);
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ignore(unsigned len);
|
||||
unsigned paravirt_patch_call(void *insnbuf,
|
||||
const void *target, u16 tgt_clobbers,
|
||||
unsigned long addr, u16 site_clobbers,
|
||||
unsigned len);
|
||||
unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
||||
unsigned long addr, unsigned len);
|
||||
unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
|
||||
const char *start, const char *end);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
int paravirt_disable_iospace(void);
|
||||
|
||||
/*
|
||||
* This generates an indirect call based on the operation type number.
|
||||
* The type number, computed in PARAVIRT_PATCH, is derived from the
|
||||
* offset into the paravirt_patch_template structure, and can therefore be
|
||||
* freely converted back into a structure offset.
|
||||
*/
|
||||
#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
|
||||
|
||||
/*
|
||||
* These macros are intended to wrap calls through one of the paravirt
|
||||
* ops structs, so that they can be later identified and patched at
|
||||
* runtime.
|
||||
*
|
||||
* Normally, a call to a pv_op function is a simple indirect call:
|
||||
* (pv_op_struct.operations)(args...).
|
||||
*
|
||||
* Unfortunately, this is a relatively slow operation for modern CPUs,
|
||||
* because it cannot necessarily determine what the destination
|
||||
* address is. In this case, the address is a runtime constant, so at
|
||||
* the very least we can patch the call to e a simple direct call, or
|
||||
* ideally, patch an inline implementation into the callsite. (Direct
|
||||
* calls are essentially free, because the call and return addresses
|
||||
* are completely predictable.)
|
||||
*
|
||||
* For i386, these macros rely on the standard gcc "regparm(3)" calling
|
||||
* convention, in which the first three arguments are placed in %eax,
|
||||
* %edx, %ecx (in that order), and the remaining arguments are placed
|
||||
* on the stack. All caller-save registers (eax,edx,ecx) are expected
|
||||
* to be modified (either clobbered or used for return values).
|
||||
* X86_64, on the other hand, already specifies a register-based calling
|
||||
* conventions, returning at %rax, with parameteres going on %rdi, %rsi,
|
||||
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
|
||||
* special handling for dealing with 4 arguments, unlike i386.
|
||||
* However, x86_64 also have to clobber all caller saved registers, which
|
||||
* unfortunately, are quite a bit (r8 - r11)
|
||||
*
|
||||
* The call instruction itself is marked by placing its start address
|
||||
* and size into the .parainstructions section, so that
|
||||
* apply_paravirt() in arch/i386/kernel/alternative.c can do the
|
||||
* appropriate patching under the control of the backend pv_init_ops
|
||||
* implementation.
|
||||
*
|
||||
* Unfortunately there's no way to get gcc to generate the args setup
|
||||
* for the call, and then allow the call itself to be generated by an
|
||||
* inline asm. Because of this, we must do the complete arg setup and
|
||||
* return value handling from within these macros. This is fairly
|
||||
* cumbersome.
|
||||
*
|
||||
* There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
|
||||
* It could be extended to more arguments, but there would be little
|
||||
* to be gained from that. For each number of arguments, there are
|
||||
* the two VCALL and CALL variants for void and non-void functions.
|
||||
*
|
||||
* When there is a return value, the invoker of the macro must specify
|
||||
* the return type. The macro then uses sizeof() on that type to
|
||||
* determine whether its a 32 or 64 bit value, and places the return
|
||||
* in the right register(s) (just %eax for 32-bit, and %edx:%eax for
|
||||
* 64-bit). For x86_64 machines, it just returns at %rax regardless of
|
||||
* the return value size.
|
||||
*
|
||||
* 64-bit arguments are passed as a pair of adjacent 32-bit arguments
|
||||
* i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
|
||||
* in low,high order
|
||||
*
|
||||
* Small structures are passed and returned in registers. The macro
|
||||
* calling convention can't directly deal with this, so the wrapper
|
||||
* functions must do this.
|
||||
*
|
||||
* These PVOP_* macros are only defined within this header. This
|
||||
* means that all uses must be wrapped in inline functions. This also
|
||||
* makes sure the incoming and outgoing types are always correct.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
|
||||
|
||||
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
|
||||
"=c" (__ecx)
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
|
||||
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
#define EXTRA_CLOBBERS
|
||||
#define VEXTRA_CLOBBERS
|
||||
#else /* CONFIG_X86_64 */
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __edi = __edi, __esi = __esi, \
|
||||
__edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
|
||||
|
||||
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
|
||||
"=S" (__esi), "=d" (__edx), \
|
||||
"=c" (__ecx)
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
|
||||
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
|
||||
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_DEBUG
|
||||
#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
|
||||
#else
|
||||
#define PVOP_TEST_NULL(op) ((void)op)
|
||||
#endif
|
||||
|
||||
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
|
||||
pre, post, ...) \
|
||||
({ \
|
||||
rettype __ret; \
|
||||
PVOP_CALL_ARGS; \
|
||||
PVOP_TEST_NULL(op); \
|
||||
/* This is 32-bit specific, but is okay in 64-bit */ \
|
||||
/* since this condition will never hold */ \
|
||||
if (sizeof(rettype) > sizeof(unsigned long)) { \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
|
||||
} else { \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)__eax; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __PVOP_CALL(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
|
||||
EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
PVOP_CALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
||||
#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
|
||||
({ \
|
||||
PVOP_VCALL_ARGS; \
|
||||
PVOP_TEST_NULL(op); \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
})
|
||||
|
||||
#define __PVOP_VCALL(op, pre, post, ...) \
|
||||
____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
|
||||
VEXTRA_CLOBBERS, \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
PVOP_VCALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
||||
|
||||
#define PVOP_CALL0(rettype, op) \
|
||||
__PVOP_CALL(rettype, op, "", "")
|
||||
#define PVOP_VCALL0(op) \
|
||||
__PVOP_VCALL(op, "", "")
|
||||
|
||||
#define PVOP_CALLEE0(rettype, op) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "")
|
||||
#define PVOP_VCALLEE0(op) \
|
||||
__PVOP_VCALLEESAVE(op, "", "")
|
||||
|
||||
|
||||
#define PVOP_CALL1(rettype, op, arg1) \
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
#define PVOP_VCALL1(op, arg1) \
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
|
||||
#define PVOP_CALLEE1(rettype, op, arg1) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
#define PVOP_VCALLEE1(op, arg1) \
|
||||
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
|
||||
|
||||
#define PVOP_CALL2(rettype, op, arg1, arg2) \
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
#define PVOP_VCALL2(op, arg1, arg2) \
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
|
||||
#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
#define PVOP_VCALLEE2(op, arg1, arg2) \
|
||||
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
|
||||
|
||||
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
|
||||
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
|
||||
|
||||
/* This is the only difference in x86_64. We can make it much simpler */
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_CALL(rettype, op, \
|
||||
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
|
||||
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_VCALL(op, \
|
||||
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
|
||||
"0" ((u32)(arg1)), "1" ((u32)(arg2)), \
|
||||
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
|
||||
#else
|
||||
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_CALL(rettype, op, "", "", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_VCALL(op, "", "", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
#endif
|
||||
|
||||
static inline int paravirt_enabled(void)
|
||||
{
|
||||
@@ -820,15 +142,22 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
||||
}
|
||||
static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
|
||||
|
||||
static inline int paravirt_rdmsr_regs(u32 *regs)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
|
||||
return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
|
||||
}
|
||||
|
||||
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
||||
}
|
||||
|
||||
static inline int paravirt_wrmsr_regs(u32 *regs)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
|
||||
}
|
||||
|
||||
/* These should all do BUG_ON(_err), but our headers are too tangled. */
|
||||
#define rdmsr(msr, val1, val2) \
|
||||
do { \
|
||||
@@ -862,6 +191,9 @@ do { \
|
||||
_err; \
|
||||
})
|
||||
|
||||
#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
|
||||
#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
|
||||
|
||||
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
@@ -871,12 +203,31 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
}
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
int err;
|
||||
|
||||
*p = paravirt_read_msr_amd(msr, &err);
|
||||
gprs[1] = msr;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
err = paravirt_rdmsr_regs(gprs);
|
||||
|
||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
|
||||
gprs[0] = (u32)val;
|
||||
gprs[1] = msr;
|
||||
gprs[2] = val >> 32;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
return paravirt_wrmsr_regs(gprs);
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_tsc(void)
|
||||
{
|
||||
return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
|
||||
@@ -1393,20 +744,6 @@ static inline void pmd_clear(pmd_t *pmdp)
|
||||
}
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
/* Lazy mode for batching updates / context switch */
|
||||
enum paravirt_lazy_mode {
|
||||
PARAVIRT_LAZY_NONE,
|
||||
PARAVIRT_LAZY_MMU,
|
||||
PARAVIRT_LAZY_CPU,
|
||||
};
|
||||
|
||||
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
|
||||
void paravirt_start_context_switch(struct task_struct *prev);
|
||||
void paravirt_end_context_switch(struct task_struct *next);
|
||||
|
||||
void paravirt_enter_lazy_mmu(void);
|
||||
void paravirt_leave_lazy_mmu(void);
|
||||
|
||||
#define __HAVE_ARCH_START_CONTEXT_SWITCH
|
||||
static inline void arch_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
@@ -1437,12 +774,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
pv_mmu_ops.set_fixmap(idx, phys, flags);
|
||||
}
|
||||
|
||||
void _paravirt_nop(void);
|
||||
u32 _paravirt_ident_32(u32);
|
||||
u64 _paravirt_ident_64(u64);
|
||||
|
||||
#define paravirt_nop ((void *)_paravirt_nop)
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
|
||||
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
|
||||
@@ -1479,17 +810,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
|
||||
|
||||
#endif
|
||||
|
||||
/* These all sit in the .parainstructions section to tell us what to patch. */
|
||||
struct paravirt_patch_site {
|
||||
u8 *instr; /* original instructions */
|
||||
u8 instrtype; /* type of this instruction */
|
||||
u8 len; /* length of original instruction */
|
||||
u16 clobbers; /* what registers you may clobber */
|
||||
};
|
||||
|
||||
extern struct paravirt_patch_site __parainstructions[],
|
||||
__parainstructions_end[];
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
|
||||
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
|
||||
|
721
arch/x86/include/asm/paravirt_types.h
Normal file
721
arch/x86/include/asm/paravirt_types.h
Normal file
@@ -0,0 +1,721 @@
|
||||
#ifndef _ASM_X86_PARAVIRT_TYPES_H
|
||||
#define _ASM_X86_PARAVIRT_TYPES_H
|
||||
|
||||
/* Bitmask of what can be clobbered: usually at least eax. */
|
||||
#define CLBR_NONE 0
|
||||
#define CLBR_EAX (1 << 0)
|
||||
#define CLBR_ECX (1 << 1)
|
||||
#define CLBR_EDX (1 << 2)
|
||||
#define CLBR_EDI (1 << 3)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
|
||||
#define CLBR_ANY ((1 << 4) - 1)
|
||||
|
||||
#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
|
||||
#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
|
||||
#define CLBR_SCRATCH (0)
|
||||
#else
|
||||
#define CLBR_RAX CLBR_EAX
|
||||
#define CLBR_RCX CLBR_ECX
|
||||
#define CLBR_RDX CLBR_EDX
|
||||
#define CLBR_RDI CLBR_EDI
|
||||
#define CLBR_RSI (1 << 4)
|
||||
#define CLBR_R8 (1 << 5)
|
||||
#define CLBR_R9 (1 << 6)
|
||||
#define CLBR_R10 (1 << 7)
|
||||
#define CLBR_R11 (1 << 8)
|
||||
|
||||
#define CLBR_ANY ((1 << 9) - 1)
|
||||
|
||||
#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
|
||||
CLBR_RCX | CLBR_R8 | CLBR_R9)
|
||||
#define CLBR_RET_REG (CLBR_RAX)
|
||||
#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
|
||||
|
||||
#endif /* X86_64 */
|
||||
|
||||
#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/desc_defs.h>
|
||||
#include <asm/kmap_types.h>
|
||||
|
||||
struct page;
|
||||
struct thread_struct;
|
||||
struct desc_ptr;
|
||||
struct tss_struct;
|
||||
struct mm_struct;
|
||||
struct desc_struct;
|
||||
struct task_struct;
|
||||
struct cpumask;
|
||||
|
||||
/*
|
||||
* Wrapper type for pointers to code which uses the non-standard
|
||||
* calling convention. See PV_CALL_SAVE_REGS_THUNK below.
|
||||
*/
|
||||
struct paravirt_callee_save {
|
||||
void *func;
|
||||
};
|
||||
|
||||
/* general info */
|
||||
struct pv_info {
|
||||
unsigned int kernel_rpl;
|
||||
int shared_kernel_pmd;
|
||||
int paravirt_enabled;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct pv_init_ops {
|
||||
/*
|
||||
* Patch may replace one of the defined code sequences with
|
||||
* arbitrary code, subject to the same register constraints.
|
||||
* This generally means the code is not free to clobber any
|
||||
* registers other than EAX. The patch function should return
|
||||
* the number of bytes of code generated, as we nop pad the
|
||||
* rest in generic code.
|
||||
*/
|
||||
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
/* Basic arch-specific setup */
|
||||
void (*arch_setup)(void);
|
||||
char *(*memory_setup)(void);
|
||||
void (*post_allocator_init)(void);
|
||||
|
||||
/* Print a banner to identify the environment */
|
||||
void (*banner)(void);
|
||||
};
|
||||
|
||||
|
||||
struct pv_lazy_ops {
|
||||
/* Set deferred update mode, used for batching operations. */
|
||||
void (*enter)(void);
|
||||
void (*leave)(void);
|
||||
};
|
||||
|
||||
struct pv_time_ops {
|
||||
void (*time_init)(void);
|
||||
|
||||
/* Set and set time of day */
|
||||
unsigned long (*get_wallclock)(void);
|
||||
int (*set_wallclock)(unsigned long);
|
||||
|
||||
unsigned long long (*sched_clock)(void);
|
||||
unsigned long (*get_tsc_khz)(void);
|
||||
};
|
||||
|
||||
struct pv_cpu_ops {
|
||||
/* hooks for various privileged instructions */
|
||||
unsigned long (*get_debugreg)(int regno);
|
||||
void (*set_debugreg)(int regno, unsigned long value);
|
||||
|
||||
void (*clts)(void);
|
||||
|
||||
unsigned long (*read_cr0)(void);
|
||||
void (*write_cr0)(unsigned long);
|
||||
|
||||
unsigned long (*read_cr4_safe)(void);
|
||||
unsigned long (*read_cr4)(void);
|
||||
void (*write_cr4)(unsigned long);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long (*read_cr8)(void);
|
||||
void (*write_cr8)(unsigned long);
|
||||
#endif
|
||||
|
||||
/* Segment descriptor handling */
|
||||
void (*load_tr_desc)(void);
|
||||
void (*load_gdt)(const struct desc_ptr *);
|
||||
void (*load_idt)(const struct desc_ptr *);
|
||||
void (*store_gdt)(struct desc_ptr *);
|
||||
void (*store_idt)(struct desc_ptr *);
|
||||
void (*set_ldt)(const void *desc, unsigned entries);
|
||||
unsigned long (*store_tr)(void);
|
||||
void (*load_tls)(struct thread_struct *t, unsigned int cpu);
|
||||
#ifdef CONFIG_X86_64
|
||||
void (*load_gs_index)(unsigned int idx);
|
||||
#endif
|
||||
void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
|
||||
const void *desc);
|
||||
void (*write_gdt_entry)(struct desc_struct *,
|
||||
int entrynum, const void *desc, int size);
|
||||
void (*write_idt_entry)(gate_desc *,
|
||||
int entrynum, const gate_desc *gate);
|
||||
void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
|
||||
void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
|
||||
|
||||
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
|
||||
|
||||
void (*set_iopl_mask)(unsigned mask);
|
||||
|
||||
void (*wbinvd)(void);
|
||||
void (*io_delay)(void);
|
||||
|
||||
/* cpuid emulation, mostly so that caps bits can be disabled */
|
||||
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx);
|
||||
|
||||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*rdmsr_regs)(u32 *regs);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
int (*wrmsr_regs)(u32 *regs);
|
||||
|
||||
u64 (*read_tsc)(void);
|
||||
u64 (*read_pmc)(int counter);
|
||||
unsigned long long (*read_tscp)(unsigned int *aux);
|
||||
|
||||
/*
|
||||
* Atomically enable interrupts and return to userspace. This
|
||||
* is only ever used to return to 32-bit processes; in a
|
||||
* 64-bit kernel, it's used for 32-on-64 compat processes, but
|
||||
* never native 64-bit processes. (Jump, not call.)
|
||||
*/
|
||||
void (*irq_enable_sysexit)(void);
|
||||
|
||||
/*
|
||||
* Switch to usermode gs and return to 64-bit usermode using
|
||||
* sysret. Only used in 64-bit kernels to return to 64-bit
|
||||
* processes. Usermode register state, including %rsp, must
|
||||
* already be restored.
|
||||
*/
|
||||
void (*usergs_sysret64)(void);
|
||||
|
||||
/*
|
||||
* Switch to usermode gs and return to 32-bit usermode using
|
||||
* sysret. Used to return to 32-on-64 compat processes.
|
||||
* Other usermode register state, including %esp, must already
|
||||
* be restored.
|
||||
*/
|
||||
void (*usergs_sysret32)(void);
|
||||
|
||||
/* Normal iret. Jump to this with the standard iret stack
|
||||
frame set up. */
|
||||
void (*iret)(void);
|
||||
|
||||
void (*swapgs)(void);
|
||||
|
||||
void (*start_context_switch)(struct task_struct *prev);
|
||||
void (*end_context_switch)(struct task_struct *next);
|
||||
};
|
||||
|
||||
struct pv_irq_ops {
|
||||
void (*init_IRQ)(void);
|
||||
|
||||
/*
|
||||
* Get/set interrupt state. save_fl and restore_fl are only
|
||||
* expected to use X86_EFLAGS_IF; all other bits
|
||||
* returned from save_fl are undefined, and may be ignored by
|
||||
* restore_fl.
|
||||
*
|
||||
* NOTE: These functions callers expect the callee to preserve
|
||||
* more registers than the standard C calling convention.
|
||||
*/
|
||||
struct paravirt_callee_save save_fl;
|
||||
struct paravirt_callee_save restore_fl;
|
||||
struct paravirt_callee_save irq_disable;
|
||||
struct paravirt_callee_save irq_enable;
|
||||
|
||||
void (*safe_halt)(void);
|
||||
void (*halt)(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void (*adjust_exception_frame)(void);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pv_apic_ops {
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
void (*setup_boot_clock)(void);
|
||||
void (*setup_secondary_clock)(void);
|
||||
|
||||
void (*startup_ipi_hook)(int phys_apicid,
|
||||
unsigned long start_eip,
|
||||
unsigned long start_esp);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pv_mmu_ops {
|
||||
/*
|
||||
* Called before/after init_mm pagetable setup. setup_start
|
||||
* may reset %cr3, and may pre-install parts of the pagetable;
|
||||
* pagetable setup is expected to preserve any existing
|
||||
* mapping.
|
||||
*/
|
||||
void (*pagetable_setup_start)(pgd_t *pgd_base);
|
||||
void (*pagetable_setup_done)(pgd_t *pgd_base);
|
||||
|
||||
unsigned long (*read_cr2)(void);
|
||||
void (*write_cr2)(unsigned long);
|
||||
|
||||
unsigned long (*read_cr3)(void);
|
||||
void (*write_cr3)(unsigned long);
|
||||
|
||||
/*
|
||||
* Hooks for intercepting the creation/use/destruction of an
|
||||
* mm_struct.
|
||||
*/
|
||||
void (*activate_mm)(struct mm_struct *prev,
|
||||
struct mm_struct *next);
|
||||
void (*dup_mmap)(struct mm_struct *oldmm,
|
||||
struct mm_struct *mm);
|
||||
void (*exit_mmap)(struct mm_struct *mm);
|
||||
|
||||
|
||||
/* TLB operations */
|
||||
void (*flush_tlb_user)(void);
|
||||
void (*flush_tlb_kernel)(void);
|
||||
void (*flush_tlb_single)(unsigned long addr);
|
||||
void (*flush_tlb_others)(const struct cpumask *cpus,
|
||||
struct mm_struct *mm,
|
||||
unsigned long va);
|
||||
|
||||
/* Hooks for allocating and freeing a pagetable top-level */
|
||||
int (*pgd_alloc)(struct mm_struct *mm);
|
||||
void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
/*
|
||||
* Hooks for allocating/releasing pagetable pages when they're
|
||||
* attached to a pagetable
|
||||
*/
|
||||
void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
|
||||
void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*release_pte)(unsigned long pfn);
|
||||
void (*release_pmd)(unsigned long pfn);
|
||||
void (*release_pud)(unsigned long pfn);
|
||||
|
||||
/* Pagetable manipulation functions */
|
||||
void (*set_pte)(pte_t *ptep, pte_t pteval);
|
||||
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval);
|
||||
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
|
||||
void (*pte_update)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*pte_update_defer)(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
|
||||
struct paravirt_callee_save pte_val;
|
||||
struct paravirt_callee_save make_pte;
|
||||
|
||||
struct paravirt_callee_save pgd_val;
|
||||
struct paravirt_callee_save make_pgd;
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#ifdef CONFIG_X86_PAE
|
||||
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
|
||||
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*pmd_clear)(pmd_t *pmdp);
|
||||
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
void (*set_pud)(pud_t *pudp, pud_t pudval);
|
||||
|
||||
struct paravirt_callee_save pmd_val;
|
||||
struct paravirt_callee_save make_pmd;
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
struct paravirt_callee_save pud_val;
|
||||
struct paravirt_callee_save make_pud;
|
||||
|
||||
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
|
||||
#ifdef CONFIG_HIGHPTE
|
||||
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
|
||||
#endif
|
||||
|
||||
struct pv_lazy_ops lazy_mode;
|
||||
|
||||
/* dom0 ops */
|
||||
|
||||
/* Sometimes the physical address is a pfn, and sometimes its
|
||||
an mfn. We can tell which is which from the index. */
|
||||
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
|
||||
phys_addr_t phys, pgprot_t flags);
|
||||
};
|
||||
|
||||
struct raw_spinlock;
|
||||
struct pv_lock_ops {
|
||||
int (*spin_is_locked)(struct raw_spinlock *lock);
|
||||
int (*spin_is_contended)(struct raw_spinlock *lock);
|
||||
void (*spin_lock)(struct raw_spinlock *lock);
|
||||
void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
|
||||
int (*spin_trylock)(struct raw_spinlock *lock);
|
||||
void (*spin_unlock)(struct raw_spinlock *lock);
|
||||
};
|
||||
|
||||
/* This contains all the paravirt structures: we get a convenient
|
||||
* number for each function using the offset which we use to indicate
|
||||
* what to patch. */
|
||||
struct paravirt_patch_template {
|
||||
struct pv_init_ops pv_init_ops;
|
||||
struct pv_time_ops pv_time_ops;
|
||||
struct pv_cpu_ops pv_cpu_ops;
|
||||
struct pv_irq_ops pv_irq_ops;
|
||||
struct pv_apic_ops pv_apic_ops;
|
||||
struct pv_mmu_ops pv_mmu_ops;
|
||||
struct pv_lock_ops pv_lock_ops;
|
||||
};
|
||||
|
||||
extern struct pv_info pv_info;
|
||||
extern struct pv_init_ops pv_init_ops;
|
||||
extern struct pv_time_ops pv_time_ops;
|
||||
extern struct pv_cpu_ops pv_cpu_ops;
|
||||
extern struct pv_irq_ops pv_irq_ops;
|
||||
extern struct pv_apic_ops pv_apic_ops;
|
||||
extern struct pv_mmu_ops pv_mmu_ops;
|
||||
extern struct pv_lock_ops pv_lock_ops;
|
||||
|
||||
#define PARAVIRT_PATCH(x) \
|
||||
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
|
||||
|
||||
#define paravirt_type(op) \
|
||||
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
|
||||
[paravirt_opptr] "i" (&(op))
|
||||
#define paravirt_clobber(clobber) \
|
||||
[paravirt_clobber] "i" (clobber)
|
||||
|
||||
/*
|
||||
* Generate some code, and mark it as patchable by the
|
||||
* apply_paravirt() alternate instruction patcher.
|
||||
*/
|
||||
#define _paravirt_alt(insn_string, type, clobber) \
|
||||
"771:\n\t" insn_string "\n" "772:\n" \
|
||||
".pushsection .parainstructions,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR " 771b\n" \
|
||||
" .byte " type "\n" \
|
||||
" .byte 772b-771b\n" \
|
||||
" .short " clobber "\n" \
|
||||
".popsection\n"
|
||||
|
||||
/* Generate patchable code, with the default asm parameters. */
|
||||
#define paravirt_alt(insn_string) \
|
||||
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
|
||||
|
||||
/* Simple instruction patching code. */
|
||||
#define DEF_NATIVE(ops, name, code) \
|
||||
extern const char start_##ops##_##name[], end_##ops##_##name[]; \
|
||||
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
|
||||
|
||||
unsigned paravirt_patch_nop(void);
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ignore(unsigned len);
|
||||
unsigned paravirt_patch_call(void *insnbuf,
|
||||
const void *target, u16 tgt_clobbers,
|
||||
unsigned long addr, u16 site_clobbers,
|
||||
unsigned len);
|
||||
unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
||||
unsigned long addr, unsigned len);
|
||||
unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
|
||||
const char *start, const char *end);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
int paravirt_disable_iospace(void);
|
||||
|
||||
/*
|
||||
* This generates an indirect call based on the operation type number.
|
||||
* The type number, computed in PARAVIRT_PATCH, is derived from the
|
||||
* offset into the paravirt_patch_template structure, and can therefore be
|
||||
* freely converted back into a structure offset.
|
||||
*/
|
||||
#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
|
||||
|
||||
/*
|
||||
* These macros are intended to wrap calls through one of the paravirt
|
||||
* ops structs, so that they can be later identified and patched at
|
||||
* runtime.
|
||||
*
|
||||
* Normally, a call to a pv_op function is a simple indirect call:
|
||||
* (pv_op_struct.operations)(args...).
|
||||
*
|
||||
* Unfortunately, this is a relatively slow operation for modern CPUs,
|
||||
* because it cannot necessarily determine what the destination
|
||||
* address is. In this case, the address is a runtime constant, so at
|
||||
* the very least we can patch the call to e a simple direct call, or
|
||||
* ideally, patch an inline implementation into the callsite. (Direct
|
||||
* calls are essentially free, because the call and return addresses
|
||||
* are completely predictable.)
|
||||
*
|
||||
* For i386, these macros rely on the standard gcc "regparm(3)" calling
|
||||
* convention, in which the first three arguments are placed in %eax,
|
||||
* %edx, %ecx (in that order), and the remaining arguments are placed
|
||||
* on the stack. All caller-save registers (eax,edx,ecx) are expected
|
||||
* to be modified (either clobbered or used for return values).
|
||||
* X86_64, on the other hand, already specifies a register-based calling
|
||||
* conventions, returning at %rax, with parameteres going on %rdi, %rsi,
|
||||
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
|
||||
* special handling for dealing with 4 arguments, unlike i386.
|
||||
* However, x86_64 also have to clobber all caller saved registers, which
|
||||
* unfortunately, are quite a bit (r8 - r11)
|
||||
*
|
||||
* The call instruction itself is marked by placing its start address
|
||||
* and size into the .parainstructions section, so that
|
||||
* apply_paravirt() in arch/i386/kernel/alternative.c can do the
|
||||
* appropriate patching under the control of the backend pv_init_ops
|
||||
* implementation.
|
||||
*
|
||||
* Unfortunately there's no way to get gcc to generate the args setup
|
||||
* for the call, and then allow the call itself to be generated by an
|
||||
* inline asm. Because of this, we must do the complete arg setup and
|
||||
* return value handling from within these macros. This is fairly
|
||||
* cumbersome.
|
||||
*
|
||||
* There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
|
||||
* It could be extended to more arguments, but there would be little
|
||||
* to be gained from that. For each number of arguments, there are
|
||||
* the two VCALL and CALL variants for void and non-void functions.
|
||||
*
|
||||
* When there is a return value, the invoker of the macro must specify
|
||||
* the return type. The macro then uses sizeof() on that type to
|
||||
* determine whether its a 32 or 64 bit value, and places the return
|
||||
* in the right register(s) (just %eax for 32-bit, and %edx:%eax for
|
||||
* 64-bit). For x86_64 machines, it just returns at %rax regardless of
|
||||
* the return value size.
|
||||
*
|
||||
* 64-bit arguments are passed as a pair of adjacent 32-bit arguments
|
||||
* i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
|
||||
* in low,high order
|
||||
*
|
||||
* Small structures are passed and returned in registers. The macro
|
||||
* calling convention can't directly deal with this, so the wrapper
|
||||
* functions must do this.
|
||||
*
|
||||
* These PVOP_* macros are only defined within this header. This
|
||||
* means that all uses must be wrapped in inline functions. This also
|
||||
* makes sure the incoming and outgoing types are always correct.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
|
||||
|
||||
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
|
||||
"=c" (__ecx)
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
|
||||
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
#define EXTRA_CLOBBERS
|
||||
#define VEXTRA_CLOBBERS
|
||||
#else /* CONFIG_X86_64 */
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __edi = __edi, __esi = __esi, \
|
||||
__edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
|
||||
|
||||
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
|
||||
"=S" (__esi), "=d" (__edx), \
|
||||
"=c" (__ecx)
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
|
||||
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
|
||||
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_DEBUG
|
||||
#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
|
||||
#else
|
||||
#define PVOP_TEST_NULL(op) ((void)op)
|
||||
#endif
|
||||
|
||||
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
|
||||
pre, post, ...) \
|
||||
({ \
|
||||
rettype __ret; \
|
||||
PVOP_CALL_ARGS; \
|
||||
PVOP_TEST_NULL(op); \
|
||||
/* This is 32-bit specific, but is okay in 64-bit */ \
|
||||
/* since this condition will never hold */ \
|
||||
if (sizeof(rettype) > sizeof(unsigned long)) { \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
|
||||
} else { \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)__eax; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __PVOP_CALL(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
|
||||
EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
PVOP_CALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
||||
#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
|
||||
({ \
|
||||
PVOP_VCALL_ARGS; \
|
||||
PVOP_TEST_NULL(op); \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
})
|
||||
|
||||
#define __PVOP_VCALL(op, pre, post, ...) \
|
||||
____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
|
||||
VEXTRA_CLOBBERS, \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
PVOP_VCALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
||||
|
||||
#define PVOP_CALL0(rettype, op) \
|
||||
__PVOP_CALL(rettype, op, "", "")
|
||||
#define PVOP_VCALL0(op) \
|
||||
__PVOP_VCALL(op, "", "")
|
||||
|
||||
#define PVOP_CALLEE0(rettype, op) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "")
|
||||
#define PVOP_VCALLEE0(op) \
|
||||
__PVOP_VCALLEESAVE(op, "", "")
|
||||
|
||||
|
||||
#define PVOP_CALL1(rettype, op, arg1) \
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
#define PVOP_VCALL1(op, arg1) \
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
|
||||
#define PVOP_CALLEE1(rettype, op, arg1) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
#define PVOP_VCALLEE1(op, arg1) \
|
||||
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
|
||||
|
||||
#define PVOP_CALL2(rettype, op, arg1, arg2) \
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
#define PVOP_VCALL2(op, arg1, arg2) \
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
|
||||
#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
#define PVOP_VCALLEE2(op, arg1, arg2) \
|
||||
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
|
||||
|
||||
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
|
||||
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
|
||||
|
||||
/* This is the only difference in x86_64. We can make it much simpler */
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_CALL(rettype, op, \
|
||||
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
|
||||
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_VCALL(op, \
|
||||
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
|
||||
"0" ((u32)(arg1)), "1" ((u32)(arg2)), \
|
||||
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
|
||||
#else
|
||||
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_CALL(rettype, op, "", "", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_VCALL(op, "", "", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
#endif
|
||||
|
||||
/* Lazy mode for batching updates / context switch */
|
||||
enum paravirt_lazy_mode {
|
||||
PARAVIRT_LAZY_NONE,
|
||||
PARAVIRT_LAZY_MMU,
|
||||
PARAVIRT_LAZY_CPU,
|
||||
};
|
||||
|
||||
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
|
||||
void paravirt_start_context_switch(struct task_struct *prev);
|
||||
void paravirt_end_context_switch(struct task_struct *next);
|
||||
|
||||
void paravirt_enter_lazy_mmu(void);
|
||||
void paravirt_leave_lazy_mmu(void);
|
||||
|
||||
void _paravirt_nop(void);
|
||||
u32 _paravirt_ident_32(u32);
|
||||
u64 _paravirt_ident_64(u64);
|
||||
|
||||
#define paravirt_nop ((void *)_paravirt_nop)
|
||||
|
||||
/* These all sit in the .parainstructions section to tell us what to patch. */
|
||||
struct paravirt_patch_site {
|
||||
u8 *instr; /* original instructions */
|
||||
u8 instrtype; /* type of this instruction */
|
||||
u8 len; /* length of original instruction */
|
||||
u16 clobbers; /* what registers you may clobber */
|
||||
};
|
||||
|
||||
extern struct paravirt_patch_site __parainstructions[],
|
||||
__parainstructions_end[];
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PARAVIRT_TYPES_H */
|
@@ -19,4 +19,9 @@ extern int free_memtype(u64 start, u64 end);
|
||||
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||
unsigned long flag);
|
||||
|
||||
int io_reserve_memtype(resource_size_t start, resource_size_t end,
|
||||
unsigned long *type);
|
||||
|
||||
void io_free_memtype(resource_size_t start, resource_size_t end);
|
||||
|
||||
#endif /* _ASM_X86_PAT_H */
|
||||
|
@@ -48,7 +48,6 @@ extern unsigned int pcibios_assign_all_busses(void);
|
||||
#else
|
||||
#define pcibios_assign_all_busses() 0
|
||||
#endif
|
||||
#define pcibios_scan_all_fns(a, b) 0
|
||||
|
||||
extern unsigned long pci_mem_start;
|
||||
#define PCIBIOS_MIN_IO 0x1000
|
||||
|
@@ -49,7 +49,7 @@
|
||||
#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
|
||||
#define __my_cpu_offset percpu_read(this_cpu_off)
|
||||
#else
|
||||
#define __percpu_arg(x) "%" #x
|
||||
#define __percpu_arg(x) "%P" #x
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -104,36 +104,48 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define percpu_from_op(op, var) \
|
||||
#define percpu_from_op(op, var, constraint) \
|
||||
({ \
|
||||
typeof(var) ret__; \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b "__percpu_arg(1)",%0" \
|
||||
: "=q" (ret__) \
|
||||
: "m" (var)); \
|
||||
: constraint); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w "__percpu_arg(1)",%0" \
|
||||
: "=r" (ret__) \
|
||||
: "m" (var)); \
|
||||
: constraint); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l "__percpu_arg(1)",%0" \
|
||||
: "=r" (ret__) \
|
||||
: "m" (var)); \
|
||||
: constraint); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q "__percpu_arg(1)",%0" \
|
||||
: "=r" (ret__) \
|
||||
: "m" (var)); \
|
||||
: constraint); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
|
||||
/*
|
||||
* percpu_read() makes gcc load the percpu variable every time it is
|
||||
* accessed while percpu_read_stable() allows the value to be cached.
|
||||
* percpu_read_stable() is more efficient and can be used if its value
|
||||
* is guaranteed to be valid across cpus. The current users include
|
||||
* get_current() and get_thread_info() both of which are actually
|
||||
* per-thread variables implemented as per-cpu variables and thus
|
||||
* stable for the duration of the respective task.
|
||||
*/
|
||||
#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
|
||||
"m" (per_cpu__##var))
|
||||
#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
|
||||
"p" (&per_cpu__##var))
|
||||
#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
|
||||
#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
|
||||
#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
|
||||
@@ -156,15 +168,6 @@ do { \
|
||||
/* We can use this directly for local CPU (faster). */
|
||||
DECLARE_PER_CPU(unsigned long, this_cpu_off);
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
void *pcpu_lpage_remapped(void *kaddr);
|
||||
#else
|
||||
static inline void *pcpu_lpage_remapped(void *kaddr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@@ -84,6 +84,16 @@ union cpuid10_edx {
|
||||
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
|
||||
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
|
||||
|
||||
/*
|
||||
* We model BTS tracing as another fixed-mode PMC.
|
||||
*
|
||||
* We choose a value in the middle of the fixed counter range, since lower
|
||||
* values are used by actual fixed counters and higher values are used
|
||||
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
|
||||
*/
|
||||
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
|
||||
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
extern void init_hw_perf_counters(void);
|
||||
extern void perf_counters_lapic_init(void);
|
||||
|
@@ -135,6 +135,11 @@ static inline unsigned long pte_pfn(pte_t pte)
|
||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
|
||||
static inline int pmd_large(pmd_t pte)
|
||||
@@ -359,7 +364,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
* this macro returns the index of the entry in the pmd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned pmd_index(unsigned long address)
|
||||
static inline unsigned long pmd_index(unsigned long address)
|
||||
{
|
||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
||||
}
|
||||
@@ -379,7 +384,7 @@ static inline unsigned pmd_index(unsigned long address)
|
||||
* this function returns the index of the entry in the pte page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned pte_index(unsigned long address)
|
||||
static inline unsigned long pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
@@ -430,11 +435,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
@@ -470,7 +470,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
||||
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
|
||||
|
||||
/* to find an entry in a page-table-directory. */
|
||||
static inline unsigned pud_index(unsigned long address)
|
||||
static inline unsigned long pud_index(unsigned long address)
|
||||
{
|
||||
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
|
||||
}
|
||||
|
@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags;
|
||||
extern asmlinkage void ignore_sysret(void);
|
||||
#else /* X86_64 */
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DECLARE_PER_CPU(unsigned long, stack_canary);
|
||||
/*
|
||||
* Make sure stack canary segment base is cached-aligned:
|
||||
* "For Intel Atom processors, avoid non zero segment base address
|
||||
* that is not aligned to cache line boundary at all cost."
|
||||
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
|
||||
*/
|
||||
struct stack_canary {
|
||||
char __pad[20]; /* canary at %gs:20 */
|
||||
unsigned long canary;
|
||||
};
|
||||
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
||||
#endif
|
||||
#endif /* X86_64 */
|
||||
|
||||
@@ -703,13 +713,23 @@ static inline void cpu_relax(void)
|
||||
rep_nop();
|
||||
}
|
||||
|
||||
/* Stop speculative execution: */
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
#if defined(CONFIG_M386) || defined(CONFIG_M486)
|
||||
if (boot_cpu_data.x86 < 5)
|
||||
/* There is no speculative execution.
|
||||
* jmp is a barrier to prefetching. */
|
||||
asm volatile("jmp 1f\n1:\n" ::: "memory");
|
||||
else
|
||||
#endif
|
||||
/* cpuid is a barrier to speculative execution.
|
||||
* Prefetched instructions are automatically
|
||||
* invalidated when modified. */
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
}
|
||||
|
||||
static inline void __monitor(const void *eax, unsigned long ecx,
|
||||
|
@@ -1,33 +1,8 @@
|
||||
#ifndef _ASM_X86_SCATTERLIST_H
|
||||
#define _ASM_X86_SCATTERLIST_H
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
struct scatterlist {
|
||||
#ifdef CONFIG_DEBUG_SG
|
||||
unsigned long sg_magic;
|
||||
#endif
|
||||
unsigned long page_link;
|
||||
unsigned int offset;
|
||||
unsigned int length;
|
||||
dma_addr_t dma_address;
|
||||
unsigned int dma_length;
|
||||
};
|
||||
|
||||
#define ARCH_HAS_SG_CHAIN
|
||||
#define ISA_DMA_THRESHOLD (0x00ffffff)
|
||||
|
||||
/*
|
||||
* These macros should be used after a pci_map_sg call has been done
|
||||
* to get bus addresses of each of the SG entries and their lengths.
|
||||
* You should only work with the number of sg entries pci_map_sg
|
||||
* returns.
|
||||
*/
|
||||
#define sg_dma_address(sg) ((sg)->dma_address)
|
||||
#ifdef CONFIG_X86_32
|
||||
# define sg_dma_len(sg) ((sg)->length)
|
||||
#else
|
||||
# define sg_dma_len(sg) ((sg)->dma_length)
|
||||
#endif
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#endif /* _ASM_X86_SCATTERLIST_H */
|
||||
|
@@ -1,51 +1 @@
|
||||
#ifndef _ASM_X86_SHMBUF_H
|
||||
#define _ASM_X86_SHMBUF_H
|
||||
|
||||
/*
|
||||
* The shmid64_ds structure for x86 architecture.
|
||||
* Note extra padding because this structure is passed back and forth
|
||||
* between kernel and user space.
|
||||
*
|
||||
* Pad space on 32 bit is left for:
|
||||
* - 64-bit time_t to solve y2038 problem
|
||||
* - 2 miscellaneous 32-bit values
|
||||
*
|
||||
* Pad space on 64 bit is left for:
|
||||
* - 2 miscellaneous 64-bit values
|
||||
*/
|
||||
|
||||
struct shmid64_ds {
|
||||
struct ipc64_perm shm_perm; /* operation perms */
|
||||
size_t shm_segsz; /* size of segment (bytes) */
|
||||
__kernel_time_t shm_atime; /* last attach time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused1;
|
||||
#endif
|
||||
__kernel_time_t shm_dtime; /* last detach time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused2;
|
||||
#endif
|
||||
__kernel_time_t shm_ctime; /* last change time */
|
||||
#ifdef __i386__
|
||||
unsigned long __unused3;
|
||||
#endif
|
||||
__kernel_pid_t shm_cpid; /* pid of creator */
|
||||
__kernel_pid_t shm_lpid; /* pid of last operator */
|
||||
unsigned long shm_nattch; /* no. of current attaches */
|
||||
unsigned long __unused4;
|
||||
unsigned long __unused5;
|
||||
};
|
||||
|
||||
struct shminfo64 {
|
||||
unsigned long shmmax;
|
||||
unsigned long shmmin;
|
||||
unsigned long shmmni;
|
||||
unsigned long shmseg;
|
||||
unsigned long shmall;
|
||||
unsigned long __unused1;
|
||||
unsigned long __unused2;
|
||||
unsigned long __unused3;
|
||||
unsigned long __unused4;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_SHMBUF_H */
|
||||
#include <asm-generic/shmbuf.h>
|
||||
|
@@ -1,60 +1 @@
|
||||
#ifndef _ASM_X86_SOCKET_H
|
||||
#define _ASM_X86_SOCKET_H
|
||||
|
||||
#include <asm/sockios.h>
|
||||
|
||||
/* For setsockopt(2) */
|
||||
#define SOL_SOCKET 1
|
||||
|
||||
#define SO_DEBUG 1
|
||||
#define SO_REUSEADDR 2
|
||||
#define SO_TYPE 3
|
||||
#define SO_ERROR 4
|
||||
#define SO_DONTROUTE 5
|
||||
#define SO_BROADCAST 6
|
||||
#define SO_SNDBUF 7
|
||||
#define SO_RCVBUF 8
|
||||
#define SO_SNDBUFFORCE 32
|
||||
#define SO_RCVBUFFORCE 33
|
||||
#define SO_KEEPALIVE 9
|
||||
#define SO_OOBINLINE 10
|
||||
#define SO_NO_CHECK 11
|
||||
#define SO_PRIORITY 12
|
||||
#define SO_LINGER 13
|
||||
#define SO_BSDCOMPAT 14
|
||||
/* To add :#define SO_REUSEPORT 15 */
|
||||
#define SO_PASSCRED 16
|
||||
#define SO_PEERCRED 17
|
||||
#define SO_RCVLOWAT 18
|
||||
#define SO_SNDLOWAT 19
|
||||
#define SO_RCVTIMEO 20
|
||||
#define SO_SNDTIMEO 21
|
||||
|
||||
/* Security levels - as per NRL IPv6 - don't actually do anything */
|
||||
#define SO_SECURITY_AUTHENTICATION 22
|
||||
#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
|
||||
#define SO_SECURITY_ENCRYPTION_NETWORK 24
|
||||
|
||||
#define SO_BINDTODEVICE 25
|
||||
|
||||
/* Socket filtering */
|
||||
#define SO_ATTACH_FILTER 26
|
||||
#define SO_DETACH_FILTER 27
|
||||
|
||||
#define SO_PEERNAME 28
|
||||
#define SO_TIMESTAMP 29
|
||||
#define SCM_TIMESTAMP SO_TIMESTAMP
|
||||
|
||||
#define SO_ACCEPTCONN 30
|
||||
|
||||
#define SO_PEERSEC 31
|
||||
#define SO_PASSSEC 34
|
||||
#define SO_TIMESTAMPNS 35
|
||||
#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
|
||||
|
||||
#define SO_MARK 36
|
||||
|
||||
#define SO_TIMESTAMPING 37
|
||||
#define SCM_TIMESTAMPING SO_TIMESTAMPING
|
||||
|
||||
#endif /* _ASM_X86_SOCKET_H */
|
||||
#include <asm-generic/socket.h>
|
||||
|
@@ -1,13 +1 @@
|
||||
#ifndef _ASM_X86_SOCKIOS_H
|
||||
#define _ASM_X86_SOCKIOS_H
|
||||
|
||||
/* Socket-level I/O control calls. */
|
||||
#define FIOSETOWN 0x8901
|
||||
#define SIOCSPGRP 0x8902
|
||||
#define FIOGETOWN 0x8903
|
||||
#define SIOCGPGRP 0x8904
|
||||
#define SIOCATMARK 0x8905
|
||||
#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
|
||||
#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
|
||||
|
||||
#endif /* _ASM_X86_SOCKIOS_H */
|
||||
#include <asm-generic/sockios.h>
|
||||
|
@@ -48,7 +48,7 @@
|
||||
* head_32 for boot CPU and setup_per_cpu_areas() for others.
|
||||
*/
|
||||
#define GDT_STACK_CANARY_INIT \
|
||||
[GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
|
||||
[GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
|
||||
|
||||
/*
|
||||
* Initialize the stackprotector canary value.
|
||||
@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
percpu_write(irq_stack_union.stack_canary, canary);
|
||||
#else
|
||||
percpu_write(stack_canary, canary);
|
||||
percpu_write(stack_canary.canary, canary);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void setup_stack_canary_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
|
||||
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
|
||||
struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
|
||||
struct desc_struct desc;
|
||||
|
||||
desc = gdt_table[GDT_ENTRY_STACK_CANARY];
|
||||
desc.base0 = canary & 0xffff;
|
||||
desc.base1 = (canary >> 16) & 0xff;
|
||||
desc.base2 = (canary >> 24) & 0xff;
|
||||
set_desc_base(&desc, canary);
|
||||
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
|
||||
#endif
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
"movl %P[task_canary](%[next]), %%ebx\n\t" \
|
||||
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
|
||||
#define __switch_canary_oparam \
|
||||
, [stack_canary] "=m" (per_cpu_var(stack_canary))
|
||||
, [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
|
||||
#define __switch_canary_iparam \
|
||||
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
||||
#else /* CC_STACKPROTECTOR */
|
||||
@@ -150,33 +150,6 @@ do { \
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define _set_base(addr, base) do { unsigned long __pr; \
|
||||
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
|
||||
"rorl $16,%%edx\n\t" \
|
||||
"movb %%dl,%2\n\t" \
|
||||
"movb %%dh,%3" \
|
||||
:"=&d" (__pr) \
|
||||
:"m" (*((addr)+2)), \
|
||||
"m" (*((addr)+4)), \
|
||||
"m" (*((addr)+7)), \
|
||||
"0" (base) \
|
||||
); } while (0)
|
||||
|
||||
#define _set_limit(addr, limit) do { unsigned long __lr; \
|
||||
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
|
||||
"rorl $16,%%edx\n\t" \
|
||||
"movb %2,%%dh\n\t" \
|
||||
"andb $0xf0,%%dh\n\t" \
|
||||
"orb %%dh,%%dl\n\t" \
|
||||
"movb %%dl,%2" \
|
||||
:"=&d" (__lr) \
|
||||
:"m" (*(addr)), \
|
||||
"m" (*((addr)+6)), \
|
||||
"0" (limit) \
|
||||
); } while (0)
|
||||
|
||||
#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
|
||||
#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
|
||||
|
||||
extern void native_load_gs_index(unsigned);
|
||||
|
||||
|
@@ -1,198 +1 @@
|
||||
#ifndef _ASM_X86_TERMBITS_H
|
||||
#define _ASM_X86_TERMBITS_H
|
||||
|
||||
#include <linux/posix_types.h>
|
||||
|
||||
typedef unsigned char cc_t;
|
||||
typedef unsigned int speed_t;
|
||||
typedef unsigned int tcflag_t;
|
||||
|
||||
#define NCCS 19
|
||||
struct termios {
|
||||
tcflag_t c_iflag; /* input mode flags */
|
||||
tcflag_t c_oflag; /* output mode flags */
|
||||
tcflag_t c_cflag; /* control mode flags */
|
||||
tcflag_t c_lflag; /* local mode flags */
|
||||
cc_t c_line; /* line discipline */
|
||||
cc_t c_cc[NCCS]; /* control characters */
|
||||
};
|
||||
|
||||
struct termios2 {
|
||||
tcflag_t c_iflag; /* input mode flags */
|
||||
tcflag_t c_oflag; /* output mode flags */
|
||||
tcflag_t c_cflag; /* control mode flags */
|
||||
tcflag_t c_lflag; /* local mode flags */
|
||||
cc_t c_line; /* line discipline */
|
||||
cc_t c_cc[NCCS]; /* control characters */
|
||||
speed_t c_ispeed; /* input speed */
|
||||
speed_t c_ospeed; /* output speed */
|
||||
};
|
||||
|
||||
struct ktermios {
|
||||
tcflag_t c_iflag; /* input mode flags */
|
||||
tcflag_t c_oflag; /* output mode flags */
|
||||
tcflag_t c_cflag; /* control mode flags */
|
||||
tcflag_t c_lflag; /* local mode flags */
|
||||
cc_t c_line; /* line discipline */
|
||||
cc_t c_cc[NCCS]; /* control characters */
|
||||
speed_t c_ispeed; /* input speed */
|
||||
speed_t c_ospeed; /* output speed */
|
||||
};
|
||||
|
||||
/* c_cc characters */
|
||||
#define VINTR 0
|
||||
#define VQUIT 1
|
||||
#define VERASE 2
|
||||
#define VKILL 3
|
||||
#define VEOF 4
|
||||
#define VTIME 5
|
||||
#define VMIN 6
|
||||
#define VSWTC 7
|
||||
#define VSTART 8
|
||||
#define VSTOP 9
|
||||
#define VSUSP 10
|
||||
#define VEOL 11
|
||||
#define VREPRINT 12
|
||||
#define VDISCARD 13
|
||||
#define VWERASE 14
|
||||
#define VLNEXT 15
|
||||
#define VEOL2 16
|
||||
|
||||
/* c_iflag bits */
|
||||
#define IGNBRK 0000001
|
||||
#define BRKINT 0000002
|
||||
#define IGNPAR 0000004
|
||||
#define PARMRK 0000010
|
||||
#define INPCK 0000020
|
||||
#define ISTRIP 0000040
|
||||
#define INLCR 0000100
|
||||
#define IGNCR 0000200
|
||||
#define ICRNL 0000400
|
||||
#define IUCLC 0001000
|
||||
#define IXON 0002000
|
||||
#define IXANY 0004000
|
||||
#define IXOFF 0010000
|
||||
#define IMAXBEL 0020000
|
||||
#define IUTF8 0040000
|
||||
|
||||
/* c_oflag bits */
|
||||
#define OPOST 0000001
|
||||
#define OLCUC 0000002
|
||||
#define ONLCR 0000004
|
||||
#define OCRNL 0000010
|
||||
#define ONOCR 0000020
|
||||
#define ONLRET 0000040
|
||||
#define OFILL 0000100
|
||||
#define OFDEL 0000200
|
||||
#define NLDLY 0000400
|
||||
#define NL0 0000000
|
||||
#define NL1 0000400
|
||||
#define CRDLY 0003000
|
||||
#define CR0 0000000
|
||||
#define CR1 0001000
|
||||
#define CR2 0002000
|
||||
#define CR3 0003000
|
||||
#define TABDLY 0014000
|
||||
#define TAB0 0000000
|
||||
#define TAB1 0004000
|
||||
#define TAB2 0010000
|
||||
#define TAB3 0014000
|
||||
#define XTABS 0014000
|
||||
#define BSDLY 0020000
|
||||
#define BS0 0000000
|
||||
#define BS1 0020000
|
||||
#define VTDLY 0040000
|
||||
#define VT0 0000000
|
||||
#define VT1 0040000
|
||||
#define FFDLY 0100000
|
||||
#define FF0 0000000
|
||||
#define FF1 0100000
|
||||
|
||||
/* c_cflag bit meaning */
|
||||
#define CBAUD 0010017
|
||||
#define B0 0000000 /* hang up */
|
||||
#define B50 0000001
|
||||
#define B75 0000002
|
||||
#define B110 0000003
|
||||
#define B134 0000004
|
||||
#define B150 0000005
|
||||
#define B200 0000006
|
||||
#define B300 0000007
|
||||
#define B600 0000010
|
||||
#define B1200 0000011
|
||||
#define B1800 0000012
|
||||
#define B2400 0000013
|
||||
#define B4800 0000014
|
||||
#define B9600 0000015
|
||||
#define B19200 0000016
|
||||
#define B38400 0000017
|
||||
#define EXTA B19200
|
||||
#define EXTB B38400
|
||||
#define CSIZE 0000060
|
||||
#define CS5 0000000
|
||||
#define CS6 0000020
|
||||
#define CS7 0000040
|
||||
#define CS8 0000060
|
||||
#define CSTOPB 0000100
|
||||
#define CREAD 0000200
|
||||
#define PARENB 0000400
|
||||
#define PARODD 0001000
|
||||
#define HUPCL 0002000
|
||||
#define CLOCAL 0004000
|
||||
#define CBAUDEX 0010000
|
||||
#define BOTHER 0010000 /* non standard rate */
|
||||
#define B57600 0010001
|
||||
#define B115200 0010002
|
||||
#define B230400 0010003
|
||||
#define B460800 0010004
|
||||
#define B500000 0010005
|
||||
#define B576000 0010006
|
||||
#define B921600 0010007
|
||||
#define B1000000 0010010
|
||||
#define B1152000 0010011
|
||||
#define B1500000 0010012
|
||||
#define B2000000 0010013
|
||||
#define B2500000 0010014
|
||||
#define B3000000 0010015
|
||||
#define B3500000 0010016
|
||||
#define B4000000 0010017
|
||||
#define CIBAUD 002003600000 /* input baud rate */
|
||||
#define CMSPAR 010000000000 /* mark or space (stick) parity */
|
||||
#define CRTSCTS 020000000000 /* flow control */
|
||||
|
||||
#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
|
||||
|
||||
/* c_lflag bits */
|
||||
#define ISIG 0000001
|
||||
#define ICANON 0000002
|
||||
#define XCASE 0000004
|
||||
#define ECHO 0000010
|
||||
#define ECHOE 0000020
|
||||
#define ECHOK 0000040
|
||||
#define ECHONL 0000100
|
||||
#define NOFLSH 0000200
|
||||
#define TOSTOP 0000400
|
||||
#define ECHOCTL 0001000
|
||||
#define ECHOPRT 0002000
|
||||
#define ECHOKE 0004000
|
||||
#define FLUSHO 0010000
|
||||
#define PENDIN 0040000
|
||||
#define IEXTEN 0100000
|
||||
|
||||
/* tcflow() and TCXONC use these */
|
||||
#define TCOOFF 0
|
||||
#define TCOON 1
|
||||
#define TCIOFF 2
|
||||
#define TCION 3
|
||||
|
||||
/* tcflush() and TCFLSH use these */
|
||||
#define TCIFLUSH 0
|
||||
#define TCOFLUSH 1
|
||||
#define TCIOFLUSH 2
|
||||
|
||||
/* tcsetattr uses these */
|
||||
#define TCSANOW 0
|
||||
#define TCSADRAIN 1
|
||||
#define TCSAFLUSH 2
|
||||
|
||||
#endif /* _ASM_X86_TERMBITS_H */
|
||||
#include <asm-generic/termbits.h>
|
||||
|
@@ -1,114 +1 @@
|
||||
#ifndef _ASM_X86_TERMIOS_H
|
||||
#define _ASM_X86_TERMIOS_H
|
||||
|
||||
#include <asm/termbits.h>
|
||||
#include <asm/ioctls.h>
|
||||
|
||||
struct winsize {
|
||||
unsigned short ws_row;
|
||||
unsigned short ws_col;
|
||||
unsigned short ws_xpixel;
|
||||
unsigned short ws_ypixel;
|
||||
};
|
||||
|
||||
#define NCC 8
|
||||
struct termio {
|
||||
unsigned short c_iflag; /* input mode flags */
|
||||
unsigned short c_oflag; /* output mode flags */
|
||||
unsigned short c_cflag; /* control mode flags */
|
||||
unsigned short c_lflag; /* local mode flags */
|
||||
unsigned char c_line; /* line discipline */
|
||||
unsigned char c_cc[NCC]; /* control characters */
|
||||
};
|
||||
|
||||
/* modem lines */
|
||||
#define TIOCM_LE 0x001
|
||||
#define TIOCM_DTR 0x002
|
||||
#define TIOCM_RTS 0x004
|
||||
#define TIOCM_ST 0x008
|
||||
#define TIOCM_SR 0x010
|
||||
#define TIOCM_CTS 0x020
|
||||
#define TIOCM_CAR 0x040
|
||||
#define TIOCM_RNG 0x080
|
||||
#define TIOCM_DSR 0x100
|
||||
#define TIOCM_CD TIOCM_CAR
|
||||
#define TIOCM_RI TIOCM_RNG
|
||||
#define TIOCM_OUT1 0x2000
|
||||
#define TIOCM_OUT2 0x4000
|
||||
#define TIOCM_LOOP 0x8000
|
||||
|
||||
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* intr=^C quit=^\ erase=del kill=^U
|
||||
eof=^D vtime=\0 vmin=\1 sxtc=\0
|
||||
start=^Q stop=^S susp=^Z eol=\0
|
||||
reprint=^R discard=^U werase=^W lnext=^V
|
||||
eol2=\0
|
||||
*/
|
||||
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
|
||||
|
||||
/*
|
||||
* Translate a "termio" structure into a "termios". Ugh.
|
||||
*/
|
||||
#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
|
||||
unsigned short __tmp; \
|
||||
get_user(__tmp,&(termio)->x); \
|
||||
*(unsigned short *) &(termios)->x = __tmp; \
|
||||
}
|
||||
|
||||
static inline int user_termio_to_kernel_termios(struct ktermios *termios,
|
||||
struct termio __user *termio)
|
||||
{
|
||||
SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
|
||||
SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
|
||||
SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
|
||||
SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
|
||||
get_user(termios->c_line, &termio->c_line);
|
||||
return copy_from_user(termios->c_cc, termio->c_cc, NCC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Translate a "termios" structure into a "termio". Ugh.
|
||||
*/
|
||||
static inline int kernel_termios_to_user_termio(struct termio __user *termio,
|
||||
struct ktermios *termios)
|
||||
{
|
||||
put_user((termios)->c_iflag, &(termio)->c_iflag);
|
||||
put_user((termios)->c_oflag, &(termio)->c_oflag);
|
||||
put_user((termios)->c_cflag, &(termio)->c_cflag);
|
||||
put_user((termios)->c_lflag, &(termio)->c_lflag);
|
||||
put_user((termios)->c_line, &(termio)->c_line);
|
||||
return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
|
||||
}
|
||||
|
||||
static inline int user_termios_to_kernel_termios(struct ktermios *k,
|
||||
struct termios2 __user *u)
|
||||
{
|
||||
return copy_from_user(k, u, sizeof(struct termios2));
|
||||
}
|
||||
|
||||
static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
|
||||
struct ktermios *k)
|
||||
{
|
||||
return copy_to_user(u, k, sizeof(struct termios2));
|
||||
}
|
||||
|
||||
static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
|
||||
struct termios __user *u)
|
||||
{
|
||||
return copy_from_user(k, u, sizeof(struct termios));
|
||||
}
|
||||
|
||||
static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
|
||||
struct ktermios *k)
|
||||
{
|
||||
return copy_to_user(u, k, sizeof(struct termios));
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_TERMIOS_H */
|
||||
#include <asm-generic/termios.h>
|
||||
|
@@ -214,7 +214,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
ti = (void *)(percpu_read(kernel_stack) +
|
||||
ti = (void *)(percpu_read_stable(kernel_stack) +
|
||||
KERNEL_STACK_OFFSET - THREAD_SIZE);
|
||||
return ti;
|
||||
}
|
||||
|
@@ -129,25 +129,34 @@ extern unsigned long node_remap_size[];
|
||||
#endif
|
||||
|
||||
/* sched_domains SD_NODE_INIT for NUMA machines */
|
||||
#define SD_NODE_INIT (struct sched_domain) { \
|
||||
.min_interval = 8, \
|
||||
.max_interval = 32, \
|
||||
.busy_factor = 32, \
|
||||
.imbalance_pct = 125, \
|
||||
.cache_nice_tries = SD_CACHE_NICE_TRIES, \
|
||||
.busy_idx = 3, \
|
||||
.idle_idx = SD_IDLE_IDX, \
|
||||
.newidle_idx = SD_NEWIDLE_IDX, \
|
||||
.wake_idx = 1, \
|
||||
.forkexec_idx = SD_FORKEXEC_IDX, \
|
||||
.flags = SD_LOAD_BALANCE \
|
||||
| SD_BALANCE_EXEC \
|
||||
| SD_BALANCE_FORK \
|
||||
| SD_WAKE_AFFINE \
|
||||
| SD_WAKE_BALANCE \
|
||||
| SD_SERIALIZE, \
|
||||
.last_balance = jiffies, \
|
||||
.balance_interval = 1, \
|
||||
#define SD_NODE_INIT (struct sched_domain) { \
|
||||
.min_interval = 8, \
|
||||
.max_interval = 32, \
|
||||
.busy_factor = 32, \
|
||||
.imbalance_pct = 125, \
|
||||
.cache_nice_tries = SD_CACHE_NICE_TRIES, \
|
||||
.busy_idx = 3, \
|
||||
.idle_idx = SD_IDLE_IDX, \
|
||||
.newidle_idx = SD_NEWIDLE_IDX, \
|
||||
.wake_idx = 1, \
|
||||
.forkexec_idx = SD_FORKEXEC_IDX, \
|
||||
\
|
||||
.flags = 1*SD_LOAD_BALANCE \
|
||||
| 1*SD_BALANCE_NEWIDLE \
|
||||
| 1*SD_BALANCE_EXEC \
|
||||
| 1*SD_BALANCE_FORK \
|
||||
| 0*SD_WAKE_IDLE \
|
||||
| 1*SD_WAKE_AFFINE \
|
||||
| 1*SD_WAKE_BALANCE \
|
||||
| 0*SD_SHARE_CPUPOWER \
|
||||
| 0*SD_POWERSAVINGS_BALANCE \
|
||||
| 0*SD_SHARE_PKG_RESOURCES \
|
||||
| 1*SD_SERIALIZE \
|
||||
| 1*SD_WAKE_IDLE_FAR \
|
||||
| 0*SD_PREFER_SIBLING \
|
||||
, \
|
||||
.last_balance = jiffies, \
|
||||
.balance_interval = 1, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64_ACPI_NUMA
|
||||
|
@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi;
|
||||
|
||||
void math_error(void __user *);
|
||||
void math_emulate(struct math_emu_info *);
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long patch_espfix_desc(unsigned long, unsigned long);
|
||||
#else
|
||||
#ifndef CONFIG_X86_32
|
||||
asmlinkage void smp_thermal_interrupt(void);
|
||||
asmlinkage void mce_threshold_interrupt(void);
|
||||
#endif
|
||||
|
@@ -1,19 +1,11 @@
|
||||
#ifndef _ASM_X86_TYPES_H
|
||||
#define _ASM_X86_TYPES_H
|
||||
|
||||
#include <asm-generic/int-ll64.h>
|
||||
#define dma_addr_t dma_addr_t
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm-generic/types.h>
|
||||
|
||||
typedef unsigned short umode_t;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* These aren't exported outside the kernel to avoid name space clashes
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef u64 dma64_addr_t;
|
||||
|
@@ -7,12 +7,6 @@
|
||||
* sigcontext struct (uc_mcontext).
|
||||
*/
|
||||
|
||||
struct ucontext {
|
||||
unsigned long uc_flags;
|
||||
struct ucontext *uc_link;
|
||||
stack_t uc_stack;
|
||||
struct sigcontext uc_mcontext;
|
||||
sigset_t uc_sigmask; /* mask last for extensibility */
|
||||
};
|
||||
#include <asm-generic/ucontext.h>
|
||||
|
||||
#endif /* _ASM_X86_UCONTEXT_H */
|
||||
|
@@ -55,6 +55,7 @@
|
||||
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
|
||||
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
|
||||
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
|
||||
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
|
||||
|
||||
|
||||
#define PIN_BASED_EXT_INTR_MASK 0x00000001
|
||||
@@ -351,9 +352,16 @@ enum vmcs_field {
|
||||
#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
|
||||
#define VMX_EPT_EXTENT_CONTEXT 1
|
||||
#define VMX_EPT_EXTENT_GLOBAL 2
|
||||
|
||||
#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
|
||||
#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
|
||||
#define VMX_EPTP_UC_BIT (1ull << 8)
|
||||
#define VMX_EPTP_WB_BIT (1ull << 14)
|
||||
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
|
||||
#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
|
||||
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
|
||||
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
|
||||
|
||||
#define VMX_EPT_DEFAULT_GAW 3
|
||||
#define VMX_EPT_MAX_GAW 0x4
|
||||
#define VMX_EPT_MT_EPTE_SHIFT 3
|
||||
|
Reference in New Issue
Block a user