Merge branch 'linus' into release
Conflicts: arch/x86/kernel/acpi/sleep.c Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
@@ -29,6 +29,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/trampoline.h>
|
||||
|
||||
#define COMPILER_DEPENDENT_INT64 long long
|
||||
#define COMPILER_DEPENDENT_UINT64 unsigned long long
|
||||
@@ -116,7 +117,8 @@ static inline void acpi_disable_pci(void)
|
||||
/* Low-level suspend routine. */
|
||||
extern int acpi_suspend_lowlevel(void);
|
||||
|
||||
extern unsigned long acpi_wakeup_address;
|
||||
extern const unsigned char acpi_wakeup_code[];
|
||||
#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
|
||||
|
||||
/* early initialization routine */
|
||||
extern void acpi_reserve_wakeup_memory(void);
|
||||
@@ -185,15 +187,7 @@ struct bootnode;
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
extern int acpi_numa;
|
||||
extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
|
||||
unsigned long end);
|
||||
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
|
||||
int num_nodes);
|
||||
#endif
|
||||
extern int x86_acpi_numa_init(void);
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
||||
|
@@ -9,23 +9,20 @@ struct amd_nb_bus_dev_range {
|
||||
u8 dev_limit;
|
||||
};
|
||||
|
||||
extern struct pci_device_id amd_nb_misc_ids[];
|
||||
extern const struct pci_device_id amd_nb_misc_ids[];
|
||||
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
|
||||
struct bootnode;
|
||||
|
||||
extern int early_is_amd_nb(u32 value);
|
||||
extern bool early_is_amd_nb(u32 value);
|
||||
extern int amd_cache_northbridges(void);
|
||||
extern void amd_flush_garts(void);
|
||||
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
|
||||
extern int amd_scan_nodes(void);
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
|
||||
extern void amd_get_nodes(struct bootnode *nodes);
|
||||
#endif
|
||||
extern int amd_numa_init(void);
|
||||
extern int amd_get_subcaches(int);
|
||||
extern int amd_set_subcaches(int, int);
|
||||
|
||||
struct amd_northbridge {
|
||||
struct pci_dev *misc;
|
||||
struct pci_dev *link;
|
||||
};
|
||||
|
||||
struct amd_northbridge_info {
|
||||
@@ -35,17 +32,18 @@ struct amd_northbridge_info {
|
||||
};
|
||||
extern struct amd_northbridge_info amd_northbridges;
|
||||
|
||||
#define AMD_NB_GART 0x1
|
||||
#define AMD_NB_L3_INDEX_DISABLE 0x2
|
||||
#define AMD_NB_GART BIT(0)
|
||||
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
|
||||
#define AMD_NB_L3_PARTITIONING BIT(2)
|
||||
|
||||
#ifdef CONFIG_AMD_NB
|
||||
|
||||
static inline int amd_nb_num(void)
|
||||
static inline u16 amd_nb_num(void)
|
||||
{
|
||||
return amd_northbridges.num;
|
||||
}
|
||||
|
||||
static inline int amd_nb_has_feature(int feature)
|
||||
static inline bool amd_nb_has_feature(unsigned feature)
|
||||
{
|
||||
return ((amd_northbridges.flags & feature) == feature);
|
||||
}
|
||||
|
@@ -220,7 +220,6 @@ extern void enable_IR_x2apic(void);
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
|
||||
extern void apic_disable(void);
|
||||
extern int lapic_get_maxlvt(void);
|
||||
extern void clear_local_APIC(void);
|
||||
extern void connect_bsp_APIC(void);
|
||||
@@ -228,7 +227,6 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
|
||||
extern void disable_local_APIC(void);
|
||||
extern void lapic_shutdown(void);
|
||||
extern int verify_local_APIC(void);
|
||||
extern void cache_APIC_registers(void);
|
||||
extern void sync_Arb_IDs(void);
|
||||
extern void init_bsp_APIC(void);
|
||||
extern void setup_local_APIC(void);
|
||||
@@ -239,8 +237,7 @@ void register_lapic_address(unsigned long address);
|
||||
extern void setup_boot_APIC_clock(void);
|
||||
extern void setup_secondary_APIC_clock(void);
|
||||
extern int APIC_init_uniprocessor(void);
|
||||
extern void enable_NMI_through_LVT0(void);
|
||||
extern int apic_force_enable(void);
|
||||
extern int apic_force_enable(unsigned long addr);
|
||||
|
||||
/*
|
||||
* On 32bit this is mach-xxx local
|
||||
@@ -261,7 +258,6 @@ static inline void lapic_shutdown(void) { }
|
||||
#define local_apic_timer_c2_ok 1
|
||||
static inline void init_apic_mappings(void) { }
|
||||
static inline void disable_local_APIC(void) { }
|
||||
static inline void apic_disable(void) { }
|
||||
# define setup_boot_APIC_clock x86_init_noop
|
||||
# define setup_secondary_APIC_clock x86_init_noop
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
@@ -307,8 +303,6 @@ struct apic {
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
|
||||
void (*setup_portio_remap)(void);
|
||||
@@ -356,6 +350,23 @@ struct apic {
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
void (*wait_icr_idle)(void);
|
||||
u32 (*safe_wait_icr_idle)(void);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Called very early during boot from get_smp_config(). It should
|
||||
* return the logical apicid. x86_[bios]_cpu_to_apicid is
|
||||
* initialized before this function is called.
|
||||
*
|
||||
* If logical apicid can't be determined that early, the function
|
||||
* may return BAD_APICID. Logical apicid will be configured after
|
||||
* init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
|
||||
* won't be applied properly during early boot in this case.
|
||||
*/
|
||||
int (*x86_32_early_logical_apicid)(int cpu);
|
||||
|
||||
/* determine CPU -> NUMA node mapping */
|
||||
int (*x86_32_numa_cpu_node)(int cpu);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -503,6 +514,11 @@ extern struct apic apic_noop;
|
||||
|
||||
extern struct apic apic_default;
|
||||
|
||||
static inline int noop_x86_32_early_logical_apicid(int cpu)
|
||||
{
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
@@ -522,7 +538,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
extern int default_x86_32_numa_cpu_node(int cpu);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -558,12 +574,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
|
||||
*retmap = *phys_map;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int default_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
@@ -596,8 +606,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern u8 cpu_2_logical_apicid[NR_CPUS];
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APIC_H */
|
||||
|
@@ -426,4 +426,16 @@ struct local_apic {
|
||||
#else
|
||||
#define BAD_APICID 0xFFFFu
|
||||
#endif
|
||||
|
||||
enum ioapic_irq_destination_types {
|
||||
dest_Fixed = 0,
|
||||
dest_LowestPrio = 1,
|
||||
dest_SMI = 2,
|
||||
dest__reserved_1 = 3,
|
||||
dest_NMI = 4,
|
||||
dest_INIT = 5,
|
||||
dest__reserved_2 = 6,
|
||||
dest_ExtINT = 7
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_APICDEF_H */
|
||||
|
@@ -12,6 +12,7 @@
|
||||
/* setup data types */
|
||||
#define SETUP_NONE 0
|
||||
#define SETUP_E820_EXT 1
|
||||
#define SETUP_DTB 2
|
||||
|
||||
/* extensible setup data list node */
|
||||
struct setup_data {
|
||||
|
@@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
|
||||
* Read/Write : ReadOnly, ReadWrite
|
||||
* Presence : NotPresent
|
||||
*
|
||||
* Within a catagory, the attributes are mutually exclusive.
|
||||
* Within a category, the attributes are mutually exclusive.
|
||||
*
|
||||
* The implementation of this API will take care of various aspects that
|
||||
* are associated with changing such attributes, such as:
|
||||
|
@@ -160,6 +160,7 @@
|
||||
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
@@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32];
|
||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
||||
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
|
||||
|
||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||
# define cpu_has_invlpg 1
|
||||
|
@@ -151,6 +151,7 @@
|
||||
#define DMA_AUTOINIT 0x10
|
||||
|
||||
|
||||
#ifdef CONFIG_ISA_DMA_API
|
||||
extern spinlock_t dma_spin_lock;
|
||||
|
||||
static inline unsigned long claim_dma_lock(void)
|
||||
@@ -164,6 +165,7 @@ static inline void release_dma_lock(unsigned long flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&dma_spin_lock, flags);
|
||||
}
|
||||
#endif /* CONFIG_ISA_DMA_API */
|
||||
|
||||
/* enable/disable a specific DMA channel */
|
||||
static inline void enable_dma(unsigned int dmanr)
|
||||
@@ -303,9 +305,11 @@ static inline int get_dma_residue(unsigned int dmanr)
|
||||
}
|
||||
|
||||
|
||||
/* These are in kernel/dma.c: */
|
||||
/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
|
||||
#ifdef CONFIG_ISA_DMA_API
|
||||
extern int request_dma(unsigned int dmanr, const char *device_id);
|
||||
extern void free_dma(unsigned int dmanr);
|
||||
#endif
|
||||
|
||||
/* From PCI */
|
||||
|
||||
|
@@ -96,7 +96,7 @@ extern void e820_setup_gap(void);
|
||||
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
|
||||
unsigned long start_addr, unsigned long long end_addr);
|
||||
struct setup_data;
|
||||
extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
|
||||
extern void parse_e820_ext(struct setup_data *data);
|
||||
|
||||
#if defined(CONFIG_X86_64) || \
|
||||
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
|
||||
|
@@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
||||
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
|
||||
|
||||
.irpc idx, "01234567"
|
||||
.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
|
||||
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
.if NUM_INVALIDATE_TLB_VECTORS > \idx
|
||||
BUILD_INTERRUPT3(invalidate_interrupt\idx,
|
||||
(INVALIDATE_TLB_VECTOR_START)+\idx,
|
||||
smp_invalidate_interrupt)
|
||||
.endif
|
||||
.endr
|
||||
#endif
|
||||
|
||||
|
@@ -7,14 +7,12 @@
|
||||
frame pointer later */
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
.macro FRAME
|
||||
pushl %ebp
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
pushl_cfi %ebp
|
||||
CFI_REL_OFFSET ebp,0
|
||||
movl %esp,%ebp
|
||||
.endm
|
||||
.macro ENDFRAME
|
||||
popl %ebp
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
popl_cfi %ebp
|
||||
CFI_RESTORE ebp
|
||||
.endm
|
||||
#else
|
||||
|
@@ -37,7 +37,7 @@
|
||||
"+m" (*uaddr), "=&r" (tem) \
|
||||
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
int newval)
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
/* Real i386 machines have no cmpxchg instruction */
|
||||
@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
|
||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
|
||||
"2:\t.section .fixup, \"ax\"\n"
|
||||
"3:\tmov %2, %0\n"
|
||||
"3:\tmov %3, %0\n"
|
||||
"\tjmp 2b\n"
|
||||
"\t.previous\n"
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
: "=a" (oldval), "+m" (*uaddr)
|
||||
: "i" (-EFAULT), "r" (newval), "0" (oldval)
|
||||
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
|
||||
: "i" (-EFAULT), "r" (newval), "1" (oldval)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return oldval;
|
||||
*uval = oldval;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void);
|
||||
extern void invalidate_interrupt5(void);
|
||||
extern void invalidate_interrupt6(void);
|
||||
extern void invalidate_interrupt7(void);
|
||||
extern void invalidate_interrupt8(void);
|
||||
extern void invalidate_interrupt9(void);
|
||||
extern void invalidate_interrupt10(void);
|
||||
extern void invalidate_interrupt11(void);
|
||||
extern void invalidate_interrupt12(void);
|
||||
extern void invalidate_interrupt13(void);
|
||||
extern void invalidate_interrupt14(void);
|
||||
extern void invalidate_interrupt15(void);
|
||||
extern void invalidate_interrupt16(void);
|
||||
extern void invalidate_interrupt17(void);
|
||||
extern void invalidate_interrupt18(void);
|
||||
extern void invalidate_interrupt19(void);
|
||||
extern void invalidate_interrupt20(void);
|
||||
extern void invalidate_interrupt21(void);
|
||||
extern void invalidate_interrupt22(void);
|
||||
extern void invalidate_interrupt23(void);
|
||||
extern void invalidate_interrupt24(void);
|
||||
extern void invalidate_interrupt25(void);
|
||||
extern void invalidate_interrupt26(void);
|
||||
extern void invalidate_interrupt27(void);
|
||||
extern void invalidate_interrupt28(void);
|
||||
extern void invalidate_interrupt29(void);
|
||||
extern void invalidate_interrupt30(void);
|
||||
extern void invalidate_interrupt31(void);
|
||||
|
||||
extern void irq_move_cleanup_interrupt(void);
|
||||
extern void reboot_interrupt(void);
|
||||
|
@@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
|
||||
unsigned long page_size_mask);
|
||||
|
||||
|
||||
extern unsigned long __initdata e820_table_start;
|
||||
extern unsigned long __meminitdata e820_table_end;
|
||||
extern unsigned long __meminitdata e820_table_top;
|
||||
extern unsigned long __initdata pgt_buf_start;
|
||||
extern unsigned long __meminitdata pgt_buf_end;
|
||||
extern unsigned long __meminitdata pgt_buf_top;
|
||||
|
||||
#endif /* _ASM_X86_INIT_32_H */
|
||||
|
@@ -63,17 +63,6 @@ union IO_APIC_reg_03 {
|
||||
} __attribute__ ((packed)) bits;
|
||||
};
|
||||
|
||||
enum ioapic_irq_destination_types {
|
||||
dest_Fixed = 0,
|
||||
dest_LowestPrio = 1,
|
||||
dest_SMI = 2,
|
||||
dest__reserved_1 = 3,
|
||||
dest_NMI = 4,
|
||||
dest_INIT = 5,
|
||||
dest__reserved_2 = 6,
|
||||
dest_ExtINT = 7
|
||||
};
|
||||
|
||||
struct IO_APIC_route_entry {
|
||||
__u32 vector : 8,
|
||||
delivery_mode : 3, /* 000: FIXED
|
||||
@@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry {
|
||||
index : 15;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define IOAPIC_AUTO -1
|
||||
#define IOAPIC_EDGE 0
|
||||
#define IOAPIC_LEVEL 1
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
|
||||
/*
|
||||
@@ -150,11 +143,6 @@ extern int timer_through_8259;
|
||||
#define io_apic_assign_pci_irqs \
|
||||
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
|
||||
|
||||
extern u8 io_apic_unique_id(u8 id);
|
||||
extern int io_apic_get_unique_id(int ioapic, int apic_id);
|
||||
extern int io_apic_get_version(int ioapic);
|
||||
extern int io_apic_get_redir_entries(int ioapic);
|
||||
|
||||
struct io_apic_irq_attr;
|
||||
extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr);
|
||||
@@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi);
|
||||
extern void ioapic_and_gsi_init(void);
|
||||
extern void ioapic_insert_resources(void);
|
||||
|
||||
int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr);
|
||||
|
||||
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
|
||||
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
|
||||
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
||||
@@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void);
|
||||
|
||||
extern void mp_save_irq(struct mpc_intsrc *m);
|
||||
|
||||
extern void disable_ioapic_support(void);
|
||||
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
@@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; }
|
||||
struct io_apic_irq_attr;
|
||||
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr) { return 0; }
|
||||
|
||||
static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { }
|
||||
static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { }
|
||||
static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void mp_save_irq(struct mpc_intsrc *m) { };
|
||||
static inline void disable_ioapic_support(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IO_APIC_H */
|
||||
|
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
|
||||
/* Avoid include hell */
|
||||
#define NMI_VECTOR 0x02
|
||||
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_allbutself(int vector);
|
||||
|
@@ -10,9 +10,6 @@
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
|
||||
/* Even though we don't support this, supply it to appease OF */
|
||||
static inline void irq_dispose_mapping(unsigned int virq) { }
|
||||
|
||||
static inline int irq_canonicalize(int irq)
|
||||
{
|
||||
return ((irq == 2) ? 9 : irq);
|
||||
|
12
arch/x86/include/asm/irq_controller.h
Normal file
12
arch/x86/include/asm/irq_controller.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef __IRQ_CONTROLLER__
|
||||
#define __IRQ_CONTROLLER__
|
||||
|
||||
struct irq_domain {
|
||||
int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
|
||||
u32 *out_hwirq, u32 *out_type);
|
||||
void *priv;
|
||||
struct device_node *controller;
|
||||
struct list_head l;
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,6 +1,7 @@
|
||||
#ifndef _ASM_X86_IRQ_VECTORS_H
|
||||
#define _ASM_X86_IRQ_VECTORS_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
/*
|
||||
* Linux IRQ vector layout.
|
||||
*
|
||||
@@ -16,8 +17,8 @@
|
||||
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
|
||||
* Vectors 32 ... 127 : device interrupts
|
||||
* Vector 128 : legacy int80 syscall interface
|
||||
* Vectors 129 ... 237 : device interrupts
|
||||
* Vectors 238 ... 255 : special interrupts
|
||||
* Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
|
||||
* Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
|
||||
*
|
||||
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
|
||||
*
|
||||
@@ -96,10 +97,25 @@
|
||||
#define THRESHOLD_APIC_VECTOR 0xf9
|
||||
#define REBOOT_VECTOR 0xf8
|
||||
|
||||
/* f0-f7 used for spreading out TLB flushes: */
|
||||
#define INVALIDATE_TLB_VECTOR_END 0xf7
|
||||
#define INVALIDATE_TLB_VECTOR_START 0xf0
|
||||
#define NUM_INVALIDATE_TLB_VECTORS 8
|
||||
/*
|
||||
* Generic system vector for platform specific use
|
||||
*/
|
||||
#define X86_PLATFORM_IPI_VECTOR 0xf7
|
||||
|
||||
/*
|
||||
* IRQ work vector:
|
||||
*/
|
||||
#define IRQ_WORK_VECTOR 0xf6
|
||||
|
||||
#define UV_BAU_MESSAGE 0xf5
|
||||
|
||||
/*
|
||||
* Self IPI vector for machine checks
|
||||
*/
|
||||
#define MCE_SELF_VECTOR 0xf4
|
||||
|
||||
/* Xen vector callback to receive events in a HVM domain */
|
||||
#define XEN_HVM_EVTCHN_CALLBACK 0xf3
|
||||
|
||||
/*
|
||||
* Local APIC timer IRQ vector is on a different priority level,
|
||||
@@ -108,25 +124,16 @@
|
||||
*/
|
||||
#define LOCAL_TIMER_VECTOR 0xef
|
||||
|
||||
/*
|
||||
* Generic system vector for platform specific use
|
||||
*/
|
||||
#define X86_PLATFORM_IPI_VECTOR 0xed
|
||||
/* up to 32 vectors used for spreading out TLB flushes: */
|
||||
#if NR_CPUS <= 32
|
||||
# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
|
||||
#else
|
||||
# define NUM_INVALIDATE_TLB_VECTORS (32)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IRQ work vector:
|
||||
*/
|
||||
#define IRQ_WORK_VECTOR 0xec
|
||||
|
||||
#define UV_BAU_MESSAGE 0xea
|
||||
|
||||
/*
|
||||
* Self IPI vector for machine checks
|
||||
*/
|
||||
#define MCE_SELF_VECTOR 0xeb
|
||||
|
||||
/* Xen vector callback to receive events in a HVM domain */
|
||||
#define XEN_HVM_EVTCHN_CALLBACK 0xe9
|
||||
#define INVALIDATE_TLB_VECTOR_END (0xee)
|
||||
#define INVALIDATE_TLB_VECTOR_START \
|
||||
(INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
|
||||
|
||||
#define NR_VECTORS 256
|
||||
|
||||
|
@@ -13,7 +13,6 @@ enum die_val {
|
||||
DIE_PANIC,
|
||||
DIE_NMI,
|
||||
DIE_DIE,
|
||||
DIE_NMIWATCHDOG,
|
||||
DIE_KERNELDEBUG,
|
||||
DIE_TRAP,
|
||||
DIE_GPF,
|
||||
@@ -27,7 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
|
||||
extern int __must_check __die(const char *, struct pt_regs *, long);
|
||||
extern void show_registers(struct pt_regs *regs);
|
||||
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
|
||||
unsigned long *sp);
|
||||
unsigned long *sp, unsigned long bp);
|
||||
extern void __show_regs(struct pt_regs *regs, int all);
|
||||
extern void show_regs(struct pt_regs *regs);
|
||||
extern unsigned long oops_begin(void);
|
||||
|
@@ -142,9 +142,9 @@ struct x86_emulate_ops {
|
||||
int (*pio_out_emulated)(int size, unsigned short port, const void *val,
|
||||
unsigned int count, struct kvm_vcpu *vcpu);
|
||||
|
||||
bool (*get_cached_descriptor)(struct desc_struct *desc,
|
||||
bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3,
|
||||
int seg, struct kvm_vcpu *vcpu);
|
||||
void (*set_cached_descriptor)(struct desc_struct *desc,
|
||||
void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3,
|
||||
int seg, struct kvm_vcpu *vcpu);
|
||||
u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
|
||||
void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
|
||||
@@ -239,6 +239,7 @@ struct x86_emulate_ctxt {
|
||||
int interruptibility;
|
||||
|
||||
bool perm_ok; /* do not check permissions if true */
|
||||
bool only_vendor_specific_insn;
|
||||
|
||||
bool have_exception;
|
||||
struct x86_exception exception;
|
||||
|
@@ -85,7 +85,7 @@
|
||||
|
||||
#define ASYNC_PF_PER_VCPU 64
|
||||
|
||||
extern spinlock_t kvm_lock;
|
||||
extern raw_spinlock_t kvm_lock;
|
||||
extern struct list_head vm_list;
|
||||
|
||||
struct kvm_vcpu;
|
||||
@@ -255,6 +255,8 @@ struct kvm_mmu {
|
||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp);
|
||||
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
u64 *spte, const void *pte, unsigned long mmu_seq);
|
||||
hpa_t root_hpa;
|
||||
int root_level;
|
||||
int shadow_root_level;
|
||||
@@ -335,12 +337,6 @@ struct kvm_vcpu_arch {
|
||||
u64 *last_pte_updated;
|
||||
gfn_t last_pte_gfn;
|
||||
|
||||
struct {
|
||||
gfn_t gfn; /* presumed gfn during guest pte update */
|
||||
pfn_t pfn; /* pfn corresponding to that gfn */
|
||||
unsigned long mmu_seq;
|
||||
} update_pte;
|
||||
|
||||
struct fpu guest_fpu;
|
||||
u64 xcr0;
|
||||
|
||||
@@ -448,7 +444,7 @@ struct kvm_arch {
|
||||
|
||||
unsigned long irq_sources_bitmap;
|
||||
s64 kvmclock_offset;
|
||||
spinlock_t tsc_write_lock;
|
||||
raw_spinlock_t tsc_write_lock;
|
||||
u64 last_tsc_nsec;
|
||||
u64 last_tsc_offset;
|
||||
u64 last_tsc_write;
|
||||
|
@@ -25,7 +25,6 @@ extern int pic_mode;
|
||||
#define MAX_IRQ_SOURCES 256
|
||||
|
||||
extern unsigned int def_to_bigsmp;
|
||||
extern u8 apicid_2_node[];
|
||||
|
||||
#ifdef CONFIG_X86_NUMAQ
|
||||
extern int mp_bus_id_to_node[MAX_MP_BUSSES];
|
||||
@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
|
||||
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
|
||||
#endif
|
||||
|
||||
#define MAX_APICID 256
|
||||
|
||||
#else /* CONFIG_X86_64: */
|
||||
|
||||
#define MAX_MP_BUSSES 256
|
||||
|
@@ -43,6 +43,7 @@
|
||||
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
@@ -52,6 +53,9 @@
|
||||
#define MSR_IA32_MCG_STATUS 0x0000017a
|
||||
#define MSR_IA32_MCG_CTL 0x0000017b
|
||||
|
||||
#define MSR_OFFCORE_RSP_0 0x000001a6
|
||||
#define MSR_OFFCORE_RSP_1 0x000001a7
|
||||
|
||||
#define MSR_IA32_PEBS_ENABLE 0x000003f1
|
||||
#define MSR_IA32_DS_AREA 0x00000600
|
||||
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
|
||||
|
@@ -7,7 +7,6 @@
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
|
||||
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
|
||||
extern int reserve_perfctr_nmi(unsigned int);
|
||||
extern void release_perfctr_nmi(unsigned int);
|
||||
@@ -30,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
|
||||
* external nmis, because the local ones are more frequent.
|
||||
*
|
||||
* Also setup some default high/normal/low settings for
|
||||
* subsystems to registers with. Using 4 bits to seperate
|
||||
* the priorities. This can go alot higher if needed be.
|
||||
* subsystems to registers with. Using 4 bits to separate
|
||||
* the priorities. This can go a lot higher if needed be.
|
||||
*/
|
||||
|
||||
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
|
||||
|
@@ -38,7 +38,7 @@
|
||||
#define K8_NOP8 K8_NOP4 K8_NOP4
|
||||
|
||||
/* K7 nops
|
||||
uses eax dependencies (arbitary choice)
|
||||
uses eax dependencies (arbitrary choice)
|
||||
1: nop
|
||||
2: movl %eax,%eax
|
||||
3: leal (,%eax,1),%eax
|
||||
|
@@ -1,5 +1,57 @@
|
||||
#ifndef _ASM_X86_NUMA_H
|
||||
#define _ASM_X86_NUMA_H
|
||||
|
||||
#include <asm/topology.h>
|
||||
#include <asm/apicdef.h>
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
/*
|
||||
* __apicid_to_node[] stores the raw mapping between physical apicid and
|
||||
* node and is used to initialize cpu_to_node mapping.
|
||||
*
|
||||
* The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
|
||||
* should be accessed by the accessors - set_apicid_to_node() and
|
||||
* numa_cpu_node().
|
||||
*/
|
||||
extern s16 __apicid_to_node[MAX_LOCAL_APIC];
|
||||
|
||||
static inline void set_apicid_to_node(int apicid, s16 node)
|
||||
{
|
||||
__apicid_to_node[apicid] = node;
|
||||
}
|
||||
#else /* CONFIG_NUMA */
|
||||
static inline void set_apicid_to_node(int apicid, s16 node)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "numa_32.h"
|
||||
#else
|
||||
# include "numa_64.h"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void __cpuinit numa_set_node(int cpu, int node);
|
||||
extern void __cpuinit numa_clear_node(int cpu);
|
||||
extern void __init numa_init_array(void);
|
||||
extern void __init init_cpu_to_node(void);
|
||||
extern void __cpuinit numa_add_cpu(int cpu);
|
||||
extern void __cpuinit numa_remove_cpu(int cpu);
|
||||
#else /* CONFIG_NUMA */
|
||||
static inline void numa_set_node(int cpu, int node) { }
|
||||
static inline void numa_clear_node(int cpu) { }
|
||||
static inline void numa_init_array(void) { }
|
||||
static inline void init_cpu_to_node(void) { }
|
||||
static inline void numa_add_cpu(int cpu) { }
|
||||
static inline void numa_remove_cpu(int cpu) { }
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NUMA_H */
|
||||
|
@@ -4,7 +4,12 @@
|
||||
extern int numa_off;
|
||||
|
||||
extern int pxm_to_nid(int pxm);
|
||||
extern void numa_remove_cpu(int cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int __cpuinit numa_cpu_node(int cpu);
|
||||
#else /* CONFIG_NUMA */
|
||||
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern void set_highmem_pages_init(void);
|
||||
|
@@ -2,23 +2,16 @@
|
||||
#define _ASM_X86_NUMA_64_H
|
||||
|
||||
#include <linux/nodemask.h>
|
||||
#include <asm/apicdef.h>
|
||||
|
||||
struct bootnode {
|
||||
u64 start;
|
||||
u64 end;
|
||||
};
|
||||
|
||||
extern int compute_hash_shift(struct bootnode *nodes, int numblks,
|
||||
int *nodeids);
|
||||
|
||||
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
|
||||
|
||||
extern void numa_init_array(void);
|
||||
extern int numa_off;
|
||||
|
||||
extern s16 apicid_to_node[MAX_LOCAL_APIC];
|
||||
|
||||
extern unsigned long numa_free_all_bootmem(void);
|
||||
extern void setup_node_bootmem(int nodeid, unsigned long start,
|
||||
unsigned long end);
|
||||
@@ -31,11 +24,11 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
|
||||
*/
|
||||
#define NODE_MIN_SIZE (4*1024*1024)
|
||||
|
||||
extern void __init init_cpu_to_node(void);
|
||||
extern void __cpuinit numa_set_node(int cpu, int node);
|
||||
extern void __cpuinit numa_clear_node(int cpu);
|
||||
extern void __cpuinit numa_add_cpu(int cpu);
|
||||
extern void __cpuinit numa_remove_cpu(int cpu);
|
||||
extern nodemask_t numa_nodes_parsed __initdata;
|
||||
|
||||
extern int __cpuinit numa_cpu_node(int cpu);
|
||||
extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
|
||||
extern void __init numa_set_distance(int from, int to, int distance);
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
|
||||
@@ -43,11 +36,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
|
||||
void numa_emu_cmdline(char *);
|
||||
#endif /* CONFIG_NUMA_EMU */
|
||||
#else
|
||||
static inline void init_cpu_to_node(void) { }
|
||||
static inline void numa_set_node(int cpu, int node) { }
|
||||
static inline void numa_clear_node(int cpu) { }
|
||||
static inline void numa_add_cpu(int cpu, int node) { }
|
||||
static inline void numa_remove_cpu(int cpu) { }
|
||||
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NUMA_64_H */
|
||||
|
@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
|
||||
|
||||
/*
|
||||
* OLPC board IDs contain the major build number within the mask 0x0ff0,
|
||||
* and the minor build number withing 0x000f. Pre-builds have a minor
|
||||
* and the minor build number within 0x000f. Pre-builds have a minor
|
||||
* number less than 8, and normal builds start at 8. For example, 0x0B10
|
||||
* is a PreB1, and 0x0C18 is a C1.
|
||||
*/
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
#define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */
|
||||
|
||||
#ifdef CONFIG_OLPC_OPENFIRMWARE
|
||||
#ifdef CONFIG_OLPC
|
||||
|
||||
extern bool olpc_ofw_is_installed(void);
|
||||
|
||||
@@ -26,19 +26,15 @@ extern void setup_olpc_ofw_pgd(void);
|
||||
/* check if OFW was detected during boot */
|
||||
extern bool olpc_ofw_present(void);
|
||||
|
||||
#else /* !CONFIG_OLPC_OPENFIRMWARE */
|
||||
|
||||
static inline bool olpc_ofw_is_installed(void) { return false; }
|
||||
#else /* !CONFIG_OLPC */
|
||||
static inline void olpc_ofw_detect(void) { }
|
||||
static inline void setup_olpc_ofw_pgd(void) { }
|
||||
static inline bool olpc_ofw_present(void) { return false; }
|
||||
#endif /* !CONFIG_OLPC */
|
||||
|
||||
#endif /* !CONFIG_OLPC_OPENFIRMWARE */
|
||||
|
||||
#ifdef CONFIG_OLPC_OPENFIRMWARE_DT
|
||||
#ifdef CONFIG_OF_PROMTREE
|
||||
extern void olpc_dt_build_devicetree(void);
|
||||
#else
|
||||
static inline void olpc_dt_build_devicetree(void) { }
|
||||
#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_OLPC_OFW_H */
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#define _ASM_X86_PAGE_DEFS_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* PAGE_SHIFT determines the page size */
|
||||
#define PAGE_SHIFT 12
|
||||
@@ -45,11 +46,15 @@ extern int devmem_is_allowed(unsigned long pagenr);
|
||||
extern unsigned long max_low_pfn_mapped;
|
||||
extern unsigned long max_pfn_mapped;
|
||||
|
||||
static inline phys_addr_t get_max_mapped(void)
|
||||
{
|
||||
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
extern unsigned long init_memory_mapping(unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
|
||||
int acpi, int k8);
|
||||
extern void initmem_init(void);
|
||||
extern void free_initmem(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@@ -451,6 +451,26 @@ do { \
|
||||
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#endif /* !CONFIG_M386 */
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
typeof(o1) __o1 = o1; \
|
||||
typeof(o1) __n1 = n1; \
|
||||
typeof(o2) __o2 = o2; \
|
||||
typeof(o2) __n2 = n2; \
|
||||
typeof(o2) __dummy = n2; \
|
||||
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
|
||||
: "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
|
||||
: "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||
#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
|
||||
#endif /* CONFIG_X86_CMPXCHG64 */
|
||||
|
||||
/*
|
||||
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||
* 32 bit must fall back to generic operations.
|
||||
@@ -480,6 +500,34 @@ do { \
|
||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
/*
|
||||
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
|
||||
* is not supported on early AMD64 processors so we must be able to emulate
|
||||
* it in software. The address used in the cmpxchg16 instruction must be
|
||||
* aligned to a 16 byte boundary.
|
||||
*/
|
||||
#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
typeof(o1) __o1 = o1; \
|
||||
typeof(o1) __n1 = n1; \
|
||||
typeof(o2) __o2 = o2; \
|
||||
typeof(o2) __n2 = n2; \
|
||||
typeof(o2) __dummy; \
|
||||
alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
|
||||
"cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
|
||||
X86_FEATURE_CX16, \
|
||||
ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
|
||||
"S" (&pcp1), "b"(__n1), "c"(__n2), \
|
||||
"a"(__o1), "d"(__o2)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||
#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
|
||||
|
||||
#endif
|
||||
|
||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
* Netburst Performance Events (P4, old Xeon)
|
||||
*/
|
||||
|
||||
#ifndef PERF_EVENT_P4_H
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* NetBurst has perfomance MSRs shared between
|
||||
* NetBurst has performance MSRs shared between
|
||||
* threads if HT is turned on, ie for both logical
|
||||
* processors (mem: in turn in Atom with HT support
|
||||
* perf-MSRs are not shared and every thread has its
|
||||
|
@@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
unsigned long pgd;
|
||||
|
||||
set_pud(pudp, __pud(0));
|
||||
|
||||
/*
|
||||
@@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp)
|
||||
* section 8.1: in PAE mode we explicitly have to flush the
|
||||
* TLB via cr3 if the top-level pgd is changed...
|
||||
*
|
||||
* Make sure the pud entry we're updating is within the
|
||||
* current pgd to avoid unnecessary TLB flushes.
|
||||
* Currently all places where pud_clear() is called either have
|
||||
* flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
|
||||
* pud_clear_bad()), so we don't need TLB flush here.
|
||||
*/
|
||||
pgd = read_cr3();
|
||||
if (__pa(pudp) >= pgd && __pa(pudp) <
|
||||
(pgd + sizeof(pgd_t)*PTRS_PER_PGD))
|
||||
write_cr3(pgd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*/
|
||||
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
|
||||
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
|
||||
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
|
||||
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
|
||||
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
|
||||
|
@@ -94,10 +94,6 @@ struct cpuinfo_x86 {
|
||||
int x86_cache_alignment; /* In bytes */
|
||||
int x86_power;
|
||||
unsigned long loops_per_jiffy;
|
||||
#ifdef CONFIG_SMP
|
||||
/* cpus sharing the last level cache: */
|
||||
cpumask_var_t llc_shared_map;
|
||||
#endif
|
||||
/* cpuid returned max cores value: */
|
||||
u16 x86_max_cores;
|
||||
u16 apicid;
|
||||
|
@@ -1 +1,69 @@
|
||||
/* dummy prom.h; here to make linux/of.h's #includes happy */
|
||||
/*
|
||||
* Definitions for Device tree / OpenFirmware handling on X86
|
||||
*
|
||||
* based on arch/powerpc/include/asm/prom.h which is
|
||||
* Copyright (C) 1996-2005 Paul Mackerras.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_PROM_H
|
||||
#define _ASM_X86_PROM_H
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/irq_controller.h>
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
extern int of_ioapic;
|
||||
extern u64 initial_dtb;
|
||||
extern void add_dtb(u64 data);
|
||||
extern void x86_add_irq_domains(void);
|
||||
void __cpuinit x86_of_pci_init(void);
|
||||
void x86_dtb_init(void);
|
||||
|
||||
static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev ? pdev->dev.of_node : NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
|
||||
{
|
||||
return pci_device_to_OF_node(bus->self);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void add_dtb(u64 data) { }
|
||||
static inline void x86_add_irq_domains(void) { }
|
||||
static inline void x86_of_pci_init(void) { }
|
||||
static inline void x86_dtb_init(void) { }
|
||||
#define of_ioapic 0
|
||||
#endif
|
||||
|
||||
extern char cmd_line[COMMAND_LINE_SIZE];
|
||||
|
||||
#define pci_address_to_pio pci_address_to_pio
|
||||
unsigned long pci_address_to_pio(phys_addr_t addr);
|
||||
|
||||
/**
|
||||
* irq_dispose_mapping - Unmap an interrupt
|
||||
* @virq: linux virq number of the interrupt to unmap
|
||||
*
|
||||
* FIXME: We really should implement proper virq handling like power,
|
||||
* but that's going to be major surgery.
|
||||
*/
|
||||
static inline void irq_dispose_mapping(unsigned int virq) { }
|
||||
|
||||
#define HAVE_ARCH_DEVTREE_FIXUPS
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#define R12 24
|
||||
#define RBP 32
|
||||
#define RBX 40
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here*/
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
@@ -73,7 +73,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
@@ -103,7 +103,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long bp;
|
||||
unsigned long bx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
|
@@ -18,7 +18,10 @@ extern struct machine_ops machine_ops;
|
||||
|
||||
void native_machine_crash_shutdown(struct pt_regs *regs);
|
||||
void native_machine_shutdown(void);
|
||||
void machine_real_restart(const unsigned char *code, int length);
|
||||
void machine_real_restart(unsigned int type);
|
||||
/* These must match dispatch_table in reboot_32.S */
|
||||
#define MRR_BIOS 0
|
||||
#define MRR_APM 1
|
||||
|
||||
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
|
||||
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
|
||||
|
@@ -37,26 +37,9 @@
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
struct rwsem_waiter;
|
||||
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_down_read_failed(struct rw_semaphore *sem);
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_down_write_failed(struct rw_semaphore *sem);
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_wake(struct rw_semaphore *);
|
||||
extern asmregparm struct rw_semaphore *
|
||||
rwsem_downgrade_wake(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* the semaphore definition
|
||||
*
|
||||
* The bias values and the counter type limits the number of
|
||||
* potential readers/writers to 32767 for 32 bits and 2147483647
|
||||
* for 64 bits.
|
||||
@@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore *
|
||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||
|
||||
typedef signed long rwsem_count_t;
|
||||
|
||||
struct rw_semaphore {
|
||||
rwsem_count_t count;
|
||||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
||||
#else
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
|
||||
#define __RWSEM_INITIALIZER(name) \
|
||||
{ \
|
||||
RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
|
||||
LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
|
||||
}
|
||||
|
||||
#define DECLARE_RWSEM(name) \
|
||||
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
||||
|
||||
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define init_rwsem(sem) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__init_rwsem((sem), #sem, &__key); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
@@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t result, tmp;
|
||||
long result, tmp;
|
||||
asm volatile("# beginning __down_read_trylock\n\t"
|
||||
" mov %0,%1\n\t"
|
||||
"1:\n\t"
|
||||
@@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
{
|
||||
rwsem_count_t tmp;
|
||||
long tmp;
|
||||
asm volatile("# beginning down_write\n\t"
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||
/* adds 0xffff0001, returns the old value */
|
||||
@@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t ret = cmpxchg(&sem->count,
|
||||
RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
if (ret == RWSEM_UNLOCKED_VALUE)
|
||||
return 1;
|
||||
return 0;
|
||||
@@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline void __up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t tmp;
|
||||
long tmp;
|
||||
asm volatile("# beginning __up_read\n\t"
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||
/* subtracts 1, returns the old value */
|
||||
@@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t tmp;
|
||||
long tmp;
|
||||
asm volatile("# beginning __up_write\n\t"
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
||||
/* subtracts 0xffff0001, returns the old value */
|
||||
@@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(rwsem_count_t delta,
|
||||
struct rw_semaphore *sem)
|
||||
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
||||
: "+m" (sem->count)
|
||||
@@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
|
||||
struct rw_semaphore *sem)
|
||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_count_t tmp = delta;
|
||||
long tmp = delta;
|
||||
|
||||
asm volatile(LOCK_PREFIX "xadd %0,%1"
|
||||
: "+r" (tmp), "+m" (sem->count)
|
||||
@@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
|
||||
return tmp + delta;
|
||||
}
|
||||
|
||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
return (sem->count != 0);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_X86_RWSEM_H */
|
||||
|
@@ -1,14 +1,16 @@
|
||||
#ifndef _ASM_X86_SEGMENT_H
|
||||
#define _ASM_X86_SEGMENT_H
|
||||
|
||||
#include <linux/const.h>
|
||||
|
||||
/* Constructor for a conventional segment GDT (or LDT) entry */
|
||||
/* This is a macro so it can be used in initializers */
|
||||
#define GDT_ENTRY(flags, base, limit) \
|
||||
((((base) & 0xff000000ULL) << (56-24)) | \
|
||||
(((flags) & 0x0000f0ffULL) << 40) | \
|
||||
(((limit) & 0x000f0000ULL) << (48-16)) | \
|
||||
(((base) & 0x00ffffffULL) << 16) | \
|
||||
(((limit) & 0x0000ffffULL)))
|
||||
((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
|
||||
(((flags) & _AC(0x0000f0ff,ULL)) << 40) | \
|
||||
(((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
|
||||
(((base) & _AC(0x00ffffff,ULL)) << 16) | \
|
||||
(((limit) & _AC(0x0000ffff,ULL))))
|
||||
|
||||
/* Simple and small GDT entries for booting only */
|
||||
|
||||
|
@@ -17,12 +17,24 @@
|
||||
#endif
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/cpumask.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
extern int smp_num_siblings;
|
||||
extern unsigned int num_processors;
|
||||
|
||||
static inline bool cpu_has_ht_siblings(void)
|
||||
{
|
||||
bool has_siblings = false;
|
||||
#ifdef CONFIG_SMP
|
||||
has_siblings = cpu_has_ht && smp_num_siblings > 1;
|
||||
#endif
|
||||
return has_siblings;
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
|
||||
/* cpus sharing the last level cache: */
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
|
||||
DECLARE_PER_CPU(u16, cpu_llc_id);
|
||||
DECLARE_PER_CPU(int, cpu_number);
|
||||
|
||||
@@ -36,8 +48,16 @@ static inline struct cpumask *cpu_core_mask(int cpu)
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_llc_shared_map, cpu);
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
|
||||
#endif
|
||||
|
||||
/* Static state in head.S used to set up a CPU */
|
||||
extern unsigned long stack_start; /* Initial stack pointer address */
|
||||
|
@@ -47,7 +47,7 @@ struct stacktrace_ops {
|
||||
};
|
||||
|
||||
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
||||
unsigned long *stack,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -86,11 +86,11 @@ stack_frame(struct task_struct *task, struct pt_regs *regs)
|
||||
|
||||
extern void
|
||||
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, char *log_lvl);
|
||||
unsigned long *stack, unsigned long bp, char *log_lvl);
|
||||
|
||||
extern void
|
||||
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *sp, char *log_lvl);
|
||||
unsigned long *sp, unsigned long bp, char *log_lvl);
|
||||
|
||||
extern unsigned int code_bytes;
|
||||
|
||||
|
@@ -98,8 +98,6 @@ do { \
|
||||
*/
|
||||
#define HAVE_DISABLE_HLT
|
||||
#else
|
||||
#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
|
||||
#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
|
||||
|
||||
/* frame pointer must be last for get_wchan */
|
||||
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
|
@@ -161,8 +161,14 @@ struct thread_info {
|
||||
|
||||
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
|
||||
|
||||
#define alloc_thread_info(tsk) \
|
||||
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
|
||||
#define alloc_thread_info_node(tsk, node) \
|
||||
({ \
|
||||
struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
|
||||
THREAD_ORDER); \
|
||||
struct thread_info *ret = page ? page_address(page) : NULL; \
|
||||
\
|
||||
ret; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
|
@@ -47,21 +47,6 @@
|
||||
|
||||
#include <asm/mpspec.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
extern int cpu_to_node_map[];
|
||||
|
||||
/* Returns the number of the node containing CPU 'cpu' */
|
||||
static inline int __cpu_to_node(int cpu)
|
||||
{
|
||||
return cpu_to_node_map[cpu];
|
||||
}
|
||||
#define early_cpu_to_node __cpu_to_node
|
||||
#define cpu_to_node __cpu_to_node
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
||||
|
||||
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
|
||||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
|
||||
@@ -155,7 +138,7 @@ extern unsigned long node_remap_size[];
|
||||
.balance_interval = 1, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64_ACPI_NUMA
|
||||
#ifdef CONFIG_X86_64
|
||||
extern int __node_distance(int, int);
|
||||
#define node_distance(a, b) __node_distance(a, b)
|
||||
#endif
|
||||
|
@@ -3,25 +3,36 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_X86_TRAMPOLINE
|
||||
#include <linux/types.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
/*
|
||||
* Trampoline 80x86 program as an array.
|
||||
* Trampoline 80x86 program as an array. These are in the init rodata
|
||||
* segment, but that's okay, because we only care about the relative
|
||||
* addresses of the symbols.
|
||||
*/
|
||||
extern const unsigned char trampoline_data [];
|
||||
extern const unsigned char trampoline_end [];
|
||||
extern unsigned char *trampoline_base;
|
||||
extern const unsigned char x86_trampoline_start [];
|
||||
extern const unsigned char x86_trampoline_end [];
|
||||
extern unsigned char *x86_trampoline_base;
|
||||
|
||||
extern unsigned long init_rsp;
|
||||
extern unsigned long initial_code;
|
||||
extern unsigned long initial_gs;
|
||||
|
||||
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
|
||||
extern void __init setup_trampolines(void);
|
||||
|
||||
extern unsigned long setup_trampoline(void);
|
||||
extern void __init reserve_trampoline_memory(void);
|
||||
#else
|
||||
static inline void reserve_trampoline_memory(void) {}
|
||||
#endif /* CONFIG_X86_TRAMPOLINE */
|
||||
extern const unsigned char trampoline_data[];
|
||||
extern const unsigned char trampoline_status[];
|
||||
|
||||
#define TRAMPOLINE_SYM(x) \
|
||||
((void *)(x86_trampoline_base + \
|
||||
((const unsigned char *)(x) - x86_trampoline_start)))
|
||||
|
||||
/* Address of the SMP trampoline */
|
||||
static inline unsigned long trampoline_address(void)
|
||||
{
|
||||
return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
|
||||
static __always_inline cycles_t vget_cycles(void)
|
||||
{
|
||||
/*
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldn't
|
||||
* access boot_cpu_data (which is not VDSO-safe):
|
||||
*/
|
||||
#ifndef CONFIG_X86_TSC
|
||||
|
@@ -1,20 +1,12 @@
|
||||
#ifndef _ASM_X86_TYPES_H
|
||||
#define _ASM_X86_TYPES_H
|
||||
|
||||
#define dma_addr_t dma_addr_t
|
||||
|
||||
#include <asm-generic/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef u64 dma64_addr_t;
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
|
||||
/* DMA addresses come in 32-bit and 64-bit flavours. */
|
||||
typedef u64 dma_addr_t;
|
||||
#else
|
||||
typedef u32 dma_addr_t;
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
@@ -346,10 +346,14 @@
|
||||
#define __NR_fanotify_init 338
|
||||
#define __NR_fanotify_mark 339
|
||||
#define __NR_prlimit64 340
|
||||
#define __NR_name_to_handle_at 341
|
||||
#define __NR_open_by_handle_at 342
|
||||
#define __NR_clock_adjtime 343
|
||||
#define __NR_syncfs 344
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define NR_syscalls 341
|
||||
#define NR_syscalls 345
|
||||
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@@ -669,6 +669,14 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
|
||||
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
|
||||
#define __NR_prlimit64 302
|
||||
__SYSCALL(__NR_prlimit64, sys_prlimit64)
|
||||
#define __NR_name_to_handle_at 303
|
||||
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
|
||||
#define __NR_open_by_handle_at 304
|
||||
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
|
||||
#define __NR_clock_adjtime 305
|
||||
__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
|
||||
#define __NR_syncfs 306
|
||||
__SYSCALL(__NR_syncfs, sys_syncfs)
|
||||
|
||||
#ifndef __NO_STUBS
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@@ -83,11 +83,13 @@ struct x86_init_paging {
|
||||
* boot cpu
|
||||
* @tsc_pre_init: platform function called before TSC init
|
||||
* @timer_init: initialize the platform timer (default PIT/HPET)
|
||||
* @wallclock_init: init the wallclock device
|
||||
*/
|
||||
struct x86_init_timers {
|
||||
void (*setup_percpu_clockev)(void);
|
||||
void (*tsc_pre_init)(void);
|
||||
void (*timer_init)(void);
|
||||
void (*wallclock_init)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set)
|
||||
static inline int
|
||||
HYPERVISOR_sched_op(int cmd, void *arg)
|
||||
{
|
||||
return _hypercall2(int, sched_op_new, cmd, arg);
|
||||
return _hypercall2(int, sched_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline long
|
||||
@@ -422,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value)
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
HYPERVISOR_suspend(unsigned long srec)
|
||||
HYPERVISOR_suspend(unsigned long start_info_mfn)
|
||||
{
|
||||
return _hypercall3(int, sched_op, SCHEDOP_shutdown,
|
||||
SHUTDOWN_suspend, srec);
|
||||
struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
|
||||
|
||||
/*
|
||||
* For a PV guest the tools require that the start_info mfn be
|
||||
* present in rdx/edx when the hypercall is made. Per the
|
||||
* hypercall calling convention this is the third hypercall
|
||||
* argument, which is start_info_mfn here.
|
||||
*/
|
||||
return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
|
||||
* The privilege level specifies which modes may enter a trap via a software
|
||||
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
|
||||
* privilege levels as follows:
|
||||
* Level == 0: Noone may enter
|
||||
* Level == 0: No one may enter
|
||||
* Level == 1: Kernel may enter
|
||||
* Level == 2: Kernel may enter
|
||||
* Level == 3: Everyone may enter
|
||||
|
@@ -29,8 +29,10 @@ typedef struct xpaddr {
|
||||
|
||||
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
|
||||
#define INVALID_P2M_ENTRY (~0UL)
|
||||
#define FOREIGN_FRAME_BIT (1UL<<31)
|
||||
#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
|
||||
#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
|
||||
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
|
||||
#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
|
||||
|
||||
/* Maximum amount of memory we can handle in a domain in pages */
|
||||
#define MAX_DOMAIN_PAGES \
|
||||
@@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order;
|
||||
|
||||
extern unsigned long get_phys_to_machine(unsigned long pfn);
|
||||
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
|
||||
unsigned long pfn_e);
|
||||
|
||||
extern int m2p_add_override(unsigned long mfn, struct page *page);
|
||||
extern int m2p_remove_override(struct page *page);
|
||||
extern struct page *m2p_find_override(unsigned long mfn);
|
||||
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG_FS
|
||||
extern int p2m_dump_show(struct seq_file *m, void *v);
|
||||
#endif
|
||||
static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
{
|
||||
unsigned long mfn;
|
||||
@@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
mfn = get_phys_to_machine(pfn);
|
||||
|
||||
if (mfn != INVALID_P2M_ENTRY)
|
||||
mfn &= ~FOREIGN_FRAME_BIT;
|
||||
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
|
||||
|
||||
return mfn;
|
||||
}
|
||||
@@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int ret = 0;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
if (unlikely((mfn >> machine_to_phys_order) != 0)) {
|
||||
pfn = ~0;
|
||||
goto try_override;
|
||||
}
|
||||
pfn = 0;
|
||||
/*
|
||||
* The array access can fail (e.g., device space beyond end of RAM).
|
||||
* In such cases it doesn't matter what we return (we return garbage),
|
||||
* but we must handle the fault without crashing!
|
||||
*/
|
||||
__get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
try_override:
|
||||
/* ret might be < 0 if there are no entries in the m2p for mfn */
|
||||
if (ret < 0)
|
||||
pfn = ~0;
|
||||
else if (get_phys_to_machine(pfn) != mfn)
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
* table to see if there's a better pfn to use.
|
||||
*
|
||||
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
||||
*/
|
||||
pfn = m2p_find_override_pfn(mfn, ~0);
|
||||
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
* table to see if there's a better pfn to use.
|
||||
/*
|
||||
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
||||
* entry doesn't map back to the mfn and m2p_override doesn't have a
|
||||
* valid entry for it.
|
||||
*/
|
||||
if (get_phys_to_machine(pfn) != mfn)
|
||||
pfn = m2p_find_override_pfn(mfn, pfn);
|
||||
if (pfn == ~0 &&
|
||||
get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
|
||||
pfn = mfn;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
|
||||
* its own functions.
|
||||
*/
|
||||
struct xen_pci_frontend_ops {
|
||||
int (*enable_msi)(struct pci_dev *dev, int **vectors);
|
||||
int (*enable_msi)(struct pci_dev *dev, int vectors[]);
|
||||
void (*disable_msi)(struct pci_dev *dev);
|
||||
int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
|
||||
int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
|
||||
void (*disable_msix)(struct pci_dev *dev);
|
||||
};
|
||||
|
||||
extern struct xen_pci_frontend_ops *xen_pci_frontend;
|
||||
|
||||
static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
|
||||
int **vectors)
|
||||
int vectors[])
|
||||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
|
||||
return xen_pci_frontend->enable_msi(dev, vectors);
|
||||
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
|
||||
xen_pci_frontend->disable_msi(dev);
|
||||
}
|
||||
static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
|
||||
int **vectors, int nvec)
|
||||
int vectors[], int nvec)
|
||||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
|
||||
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
|
||||
|
Reference in New Issue
Block a user